diff --git a/keystone_plugin/tests/__init__.py b/.cicd/__init__.py similarity index 100% rename from keystone_plugin/tests/__init__.py rename to .cicd/__init__.py diff --git a/.cicd/resource_interface_mappings.py b/.cicd/resource_interface_mappings.py new file mode 100644 index 00000000..45ba3238 --- /dev/null +++ b/.cicd/resource_interface_mappings.py @@ -0,0 +1,113 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from openstack import connect + + +class InterfaceBase(object): + + type_name = None + client_name = None + get_method_name = None + delete_method_name = None + + def __init__(self, resource_id, client_config): + self.id = resource_id + self.client_config = client_config + + @property + def client(self): + openstack_connecton = connect(**self.client_config) + return getattr(openstack_connecton, self.client_name) + + def get(self): + get_method = getattr(self.client, self.get_method_name) + return get_method(self.id) + + def delete(self): + delete_method = getattr(self.client, self.delete_method_name) + return delete_method(self.id) + + +class Router(InterfaceBase): + + type_name = 'cloudify.nodes.openstack.Router' + client_name = 'network' + get_method_name = 'get_router' + delete_method_name = 'delete_router' + + +class Network(InterfaceBase): + + type_name = 'cloudify.nodes.openstack.Network' + client_name = 'network' + get_method_name = 'get_network' + delete_method_name = 'delete_network' + + +class Subnet(InterfaceBase): + + type_name = 'cloudify.nodes.openstack.Subnet' + client_name = 'network' + get_method_name = 'get_subnet' + delete_method_name = 'delete_subnet' + + +class SecurityGroup(InterfaceBase): + + type_name = 'cloudify.nodes.openstack.SecurityGroup' + client_name = 'network' + get_method_name = 'get_security_group' + delete_method_name = 'delete_security_group' + + +class Port(InterfaceBase): + + type_name = 'cloudify.nodes.openstack.Port' + client_name = 'network' + get_method_name = 'get_port' + delete_method_name = 'delete_port' + + +class KeyPair(InterfaceBase): + + type_name = 'cloudify.nodes.openstack.KeyPair' + client_name = 'compute' + get_method_name = 'get_keypair' + delete_method_name = 'delete_keypair' + + +class VolumeType(InterfaceBase): + + type_name = 'cloudify.nodes.openstack.VolumeType' + client_name = 'block_storage' + get_method_name = 'get_type' + delete_method_name = 'delete_type' + + +class Server(InterfaceBase): + + type_name = 'cloudify.nodes.openstack.Server' + client_name = 'compute' + get_method_name = 'get_server' + delete_method_name = 'delete_server' + + +class FloatingIP(InterfaceBase): + + type_name = 'cloudify.nodes.openstack.FloatingIP' + client_name = 'network' + get_method_name = 'get_ip' + delete_method_name = 'delete_ip' diff --git a/.cicd/test_local.py b/.cicd/test_local.py new file mode 100644 index 00000000..02ad159c --- /dev/null +++ b/.cicd/test_local.py @@ -0,0 +1,328 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Standard Imports +from os import getenv +import time +import StringIO +import unittest + +# Third party imports +import openstack +import requests +import requests.exceptions +from fabric.api import settings as fabric_settings, run as fabric_run +from cloudify.workflows import local + +# Local imports +import resource_interface_mappings + + +IGNORED_LOCAL_WORKFLOW_MODULES = ( + 'worker_installer.tasks', + 'plugin_installer.tasks', + 'cloudify_agent.operations', + 'cloudify_agent.installer.operations', +) + +RETRY_MAX = 10 +RETRY_INT = 1 + + +class TestEnvironmentValidationError(Exception): + pass + + +class LiveUseCaseTests(unittest.TestCase): + """ Test a use case using a "local" Cloudify Workflow. + + Write a blueprint for a particular use case, + for example creating a port with allowed address pairs. + + You need the client config in your inputs, e.g.: + ```yaml + auth_url: + type: string + + username: + type: string + + password: + type: string + + region_name: + type: string + + project_name: + type: string + ``` + + To setup your test environment in PyCharm, + add the following environment variables: + + ```bash + openstack_username=.......;openstack_password=.........; + openstack_project_name=.....;openstack_region_name=RegionOne; + openstack_auth_url=https://....;= + ``` + + """ + + def setUp(self): + super(LiveUseCaseTests, self).setUp() + + @property + def client_config(self): + return { + 'auth_url': getenv('openstack_auth_url'), + 'username': getenv('openstack_username'), + 'password': getenv('openstack_password'), + 'project_name': getenv('openstack_project_name'), + 'region_name': getenv('openstack_region_name'), + } + + @staticmethod + def resolve_resource_interface(node_type): + try: + return getattr( + resource_interface_mappings, node_type.split('.')[-1]) + except AttributeError: + return None + + def get_resource_interfaces(self): + resource_interface_list = [] + for node_instance in self.cfy_local.storage.get_node_instances(): + node_template = \ + self.cfy_local.storage.get_node(node_instance.node_id) + resource_interface = \ + self.resolve_resource_interface( + node_template.type) + if not resource_interface: + continue + if node_template.properties['use_external_resource']: + continue + resource_identifier = \ + node_template.properties['resource_config'].get('name') or \ + node_template.properties['resource_config'].get('id') + if not resource_identifier: + raise Exception('Test blueprints must provide name or id.') + resource_interface_list.append( + resource_interface(resource_identifier, self.client_config)) + return resource_interface_list + + def verify_no_conflicting_resources(self): + """ This method checks that there are no conflicting resources in + Openstack before we run a test. + :return: Nothing. + :Raises Exception: Raises an exception if there are such resources. + """ + for resource_interface in self.get_resource_interfaces(): + try: + conflicting_resource = resource_interface.get() + except openstack.exceptions.HttpException: + continue + raise TestEnvironmentValidationError( + 'Conflicting resource found {0}'.format(conflicting_resource)) + + def delete_all_resources(self): + """Deletes orphan resources in Openstack. + :return: Nothing. + """ + for resource_interface in self.get_resource_interfaces(): + try: + resource_interface.delete() + except openstack.exceptions.SDKException: + pass + + def initialize_local_blueprint(self): + self.cfy_local = local.init_env( + self.blueprint_path, + self.test_name, + inputs=self.inputs, + ignored_modules=IGNORED_LOCAL_WORKFLOW_MODULES) + self.verify_no_conflicting_resources() + + def install_blueprint(self, + task_retries=RETRY_MAX, + task_retry_interval=RETRY_INT): + + self.cfy_local.execute( + 'install', + task_retries=task_retries, + task_retry_interval=task_retry_interval) + + def uninstall_blueprint(self, + task_retries=RETRY_MAX, + task_retry_interval=RETRY_INT, + ignore_failure=False): + + if ignore_failure: + self.cfy_local.execute( + 'uninstall', + parameters={'ignore_failure': True}, + task_retries=task_retries, + task_retry_interval=task_retry_interval) + else: + self.cfy_local.execute( + 'uninstall', + task_retries=task_retries, + task_retry_interval=task_retry_interval) + + def cleanup_uninstall(self): + self.uninstall_blueprint(ignore_failure=True) + + def test_keypair_example(self, *_): + self.test_name = 'test_keypair_example' + self.blueprint_path = './examples/local/keypair.yaml' + self.inputs = dict(self.client_config) + self.initialize_local_blueprint() + self.install_blueprint() + self.uninstall_blueprint() + + def test_server_group_example(self, *_): + self.test_name = 'test_server_group_example' + self.blueprint_path = './examples/local/server_group.yaml' + self.inputs = dict(self.client_config) + self.initialize_local_blueprint() + self.install_blueprint() + self.uninstall_blueprint() + + # Requires Special Permissions + def test_volume_type_example(self, *_): + self.test_name = 'test_volume_type_example' + self.blueprint_path = './examples/local/volume_type.yaml' + self.inputs = dict(self.client_config) + self.initialize_local_blueprint() + # execute install workflow + self.cfy_local.execute( + 'install', + task_retries=30, + task_retry_interval=1) + # execute uninstall workflow + self.cfy_local.execute( + 'uninstall', + task_retries=30, + task_retry_interval=1) + + def test_network_example(self, *_): + self.test_name = 'test_network_example' + self.blueprint_path = './examples/local/network.yaml' + self.inputs = dict(self.client_config) + self.inputs.update( + { + 'example_subnet_cidr': '10.10.0.0/24', + 'example_fixed_ip': '10.10.0.11', + 'name_prefix': 'network_' + } + ) + self.initialize_local_blueprint() + self.install_blueprint() + self.uninstall_blueprint() + + def test_blueprint_example(self, *_): + self.test_name = 'test_blueprint_example' + self.blueprint_path = './examples/local/blueprint.yaml' + self.inputs = dict(self.client_config) + self.inputs.update( + { + 'external_network_id': 'dda079ce-12cf-4309-879a-8e67aec94de4', + 'example_subnet_cidr': '10.10.0.0/24', + 'name_prefix': 'blueprint_', + 'image_id': 'e41430f7-9131-495b-927f-e7dc4b8994c8', + 'flavor_id': '3', + 'agent_user': 'ubuntu' + } + ) + self.initialize_local_blueprint() + self.install_blueprint() + time.sleep(10) + private_key = StringIO.StringIO() + try: + server_floating_ip = \ + self.cfy_local.storage.get_node_instances( + 'example-floating_ip_address')[0] + server_key_instance = \ + self.cfy_local.storage.get_node_instances( + 'example-keypair')[0] + ip_address = \ + server_floating_ip.runtime_properties[ + 'floating_ip_address'] + private_key.write( + server_key_instance.runtime_properties['private_key']) + private_key.pos = 0 + except (KeyError, IndexError) as e: + raise Exception('Missing Runtime Property: {0}'.format(str(e))) + + with fabric_settings( + host_string=ip_address, + key=private_key.read(), + user=self.inputs.get('agent_user'), + abort_on_prompts=True): + fabric_run_output = fabric_run('last') + self.assertEqual(0, fabric_run_output.return_code) + + # execute uninstall workflow + self.uninstall_blueprint() + + def test_hello_world_example(self, *_): + self.addCleanup(self.cleanup_uninstall) + self.test_name = 'test_hello_world_example' + self.blueprint_path = \ + './examples/cloudify-hello-world-example/openstack.yaml' + self.inputs = dict(self.client_config) + self.inputs.update( + { + 'external_network_id': 'dda079ce-12cf-4309-879a-8e67aec94de4', + 'name_prefix': 'hello_world', + 'image': 'e41430f7-9131-495b-927f-e7dc4b8994c8', + 'flavor': '2', + } + ) + self.initialize_local_blueprint() + self.install_blueprint() + time.sleep(10) + + try: + server_floating_ip = \ + self.cfy_local.storage.get_node_instances('ip')[0] + ip_address = \ + server_floating_ip.runtime_properties[ + 'floating_ip_address'] + + # Before checking the response returned from the apache server + # installed on the server of this blueprint, it could take up to + # 30 seconds or less to be up and running, so that we need to + # have to wait this time + timeout = 30 + current_time = time.time() + is_up = False + + while not is_up and time.time() <= timeout + current_time: + try: + response = requests.get('http://{0}'.format(ip_address)) + self.assertEqual(response.status_code, 200) + is_up = True + except requests.exceptions.ConnectionError: + pass + + if not is_up: + raise Exception( + 'Server is not responding,' + ' please check your blueprint configuration') + + except (KeyError, IndexError) as e: + raise Exception('Missing Runtime Property: {0}'.format(str(e))) + # execute uninstall workflow + self.uninstall_blueprint() diff --git a/.cicd/test_manager.py b/.cicd/test_manager.py new file mode 100644 index 00000000..1a9963de --- /dev/null +++ b/.cicd/test_manager.py @@ -0,0 +1,70 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +from integration_tests.tests.test_cases import PluginsTest +from integration_tests.tests import utils as test_utils + +PLUGIN_NAME = 'cloudify-openstacksdk-plugin' + + +class OpenstackPluginTestCase(PluginsTest): + + base_path = os.path.dirname(os.path.realpath(__file__)) + + @property + def plugin_root_directory(self): + return os.path.abspath(os.path.join(self.base_path, '..')) + + @property + def client_config(self): + return { + 'auth_url': os.getenv('openstack_auth_url'), + 'username': os.getenv('openstack_username'), + 'password': os.getenv('openstack_password'), + 'project_name': os.getenv('openstack_project_name'), + 'region_name': os.getenv('openstack_region_name'), + } + + def check_main_blueprint(self): + blueprint_id = 'manager_blueprint' + self.inputs = dict(self.client_config) + self.inputs.update( + { + 'external_network_id': os.getenv( + 'external_network_id', + 'dda079ce-12cf-4309-879a-8e67aec94de4'), + 'example_subnet_cidr': '10.10.0.0/24', + 'name_prefix': 'blueprint_', + 'image_id': 'e41430f7-9131-495b-927f-e7dc4b8994c8', + 'flavor_id': '3', + 'agent_user': 'ubuntu' + } + ) + dep, ex_id = self.deploy_application( + test_utils.get_resource( + os.path.join( + self.plugin_root_directory, + 'examples/manager/blueprint.yaml')), + timeout_seconds=200, + blueprint_id=blueprint_id, + deployment_id=blueprint_id, + inputs=self.inputs) + self.undeploy_application(dep.id) + + def test_blueprints(self): + self.upload_mock_plugin(PLUGIN_NAME, self.plugin_root_directory) + self.check_main_blueprint() diff --git a/.circleci/config.yml b/.circleci/config.yml index c147c76a..e5017da0 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,5 +1,4 @@ version: 2 - checkout: post: - > @@ -8,78 +7,35 @@ checkout: git fetch origin +refs/pull/$PR_ID/merge: git checkout -qf FETCH_HEAD fi - jobs: - unittests: docker: - image: circleci/python:2.7.15-stretch steps: - checkout - run: - name: Download pip - command: curl "https://bootstrap.pypa.io/get-pip.py" -o "get-pip.py" - - run: - name: Install pip - command: sudo python get-pip.py - - run: - name: Install virtualenv - command: pip install --user virtualenv - - run: - name: Init virtualenv - command: virtualenv env - - run: - name: install tox - command: pip install --user tox - - run: /home/circleci/.local/bin/tox -e flake8 - - run: /home/circleci/.local/bin/tox -e py27 - - wagon: - docker: - - image: amd64/centos:centos7.3.1611 - steps: - - checkout - - run: - name: Install dependencies - command: yum -y install python-devel gcc openssl git libxslt-devel libxml2-devel openldap-devel libffi-devel openssl-devel libvirt-devel - - run: - name: Download pip - command: curl "https://bootstrap.pypa.io/get-pip.py" -o "get-pip.py" - - run: - name: Install pip - command: python get-pip.py - - run: - name: Upgrade pip - command: pip install --upgrade pip==9.0.1 - - run: - name: Install virtualenv - command: pip install virtualenv - - run: - name: Init virtualenv - command: virtualenv env - - run: - name: Install wagon - command: pip install wagon==0.3.2 - - run: - name: many_linux - command: echo "manylinux1_compatible = False" > "env/bin/_manylinux.py" - - run: - name: make workspace - command: mkdir -p workspace/build - - run: - name: Create wagon - command: source env/bin/activate && wagon create -s . -v -o workspace/build -f -a '--no-cache-dir -c constraints.txt' - - persist_to_workspace: - root: workspace - paths: - - build/* - + name: Running Unit Tests + command: | + curl "https://bootstrap.pypa.io/get-pip.py" -o "get-pip.py" + sudo python get-pip.py + pip install --user virtualenv + virtualenv env + source env/bin/activate + pip install tox + tox -e flake8 + tox -e py27 workflows: version: 2 tests: jobs: - unittests - - wagon: - filters: - branches: - only: /([0-9\.]*\-build|master|dev)/ + nightly: + triggers: + - schedule: + cron: "0 0 * * *" + filters: + branches: + only: + - dev + jobs: + - unittests diff --git a/.gitignore b/.gitignore index 8b0a2dc5..f274a24f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,26 +1,35 @@ # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] +*$py.class # C extensions *.so # Distribution / packaging .Python -env/ -bin/ build/ develop-eggs/ dist/ +downloads/ eggs/ +.eggs/ lib/ lib64/ parts/ sdist/ var/ +wheels/ *.egg-info/ .installed.cfg *.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec # Installer logs pip-log.txt @@ -30,36 +39,71 @@ pip-delete-this-directory.txt htmlcov/ .tox/ .coverage -cover/ +.coverage.* .cache nosetests.xml coverage.xml +cover/ +*.cover +.hypothesis/ +.pytest_cache/ # Translations *.mo - -# Mr Developer -.mr.developer.cfg -.project -.pydevproject - -# Rope -.ropeproject +*.pot # Django stuff: *.log -*.pot +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy # Sphinx documentation docs/_build/ -*.iml +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py -*COMMIT_MSG +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject -# QuickBuild -.qbcache/ +# mkdocs documentation +/site -.idea/ +# mypy +.mypy_cache/ +.idea/* -.DS_Store \ No newline at end of file +.DS_Store +*.wgn diff --git a/CHANGELOG.txt b/CHANGELOG.txt index 098f0559..229dbc7e 100644 --- a/CHANGELOG.txt +++ b/CHANGELOG.txt @@ -1,3 +1,5 @@ +3.0.0: + - Openstack Plugin v3: new plugin based on openstacksdk official library (instead of CLI package), new types, new examples, new tests. 2.14.7: - Revert upgrade to OpenStack python clients to the old version used before 2.13.0 2.14.6: diff --git a/LICENSE b/LICENSE deleted file mode 100644 index e06d2081..00000000 --- a/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/Makefile b/Makefile deleted file mode 100644 index cfb7416f..00000000 --- a/Makefile +++ /dev/null @@ -1,39 +0,0 @@ -.PHONY: release install files test docs prepare publish - -all: - @echo "make release - prepares a release and publishes it" - @echo "make dev - prepares a development environment" - @echo "make install - install on local system" - @echo "make files - update changelog and todo files" - @echo "make test - run tox" - @echo "make docs - build docs" - @echo "prepare - prepare module for release (CURRENTLY IRRELEVANT)" - @echo "make publish - upload to pypi" - -release: test docs publish - -dev: - pip install -rdev-requirements.txt - python setup.py develop - -install: - python setup.py install - -files: - grep '# TODO' -rn * --exclude-dir=docs --exclude-dir=build --exclude=TODO.md | sed 's/: \+#/: # /g;s/:#/: # /g' | sed -e 's/^/- /' | grep -v Makefile > TODO.md - git log --oneline --decorate --color > CHANGELOG - -test: - pip install tox - tox - -docs: - pip install sphinx sphinx-rtd-theme - cd docs && make html - pandoc README.md -f markdown -t rst -s -o README.rst - -prepare: - python scripts/make-release.py - -publish: - python setup.py sdist upload \ No newline at end of file diff --git a/README.md b/README.md index 52867036..7059900e 100644 --- a/README.md +++ b/README.md @@ -1,30 +1,3 @@ -cloudify-openstack-plugin -========================= - -[![Circle CI](https://circleci.com/gh/cloudify-cosmo/cloudify-openstack-plugin/tree/master.svg?style=shield)](https://circleci.com/gh/cloudify-cosmo/cloudify-openstack-plugin/tree/master) -[![Build Status](https://travis-ci.org/cloudify-cosmo/cloudify-openstack-plugin.svg?branch=master)](https://travis-ci.org/cloudify-cosmo/cloudify-openstack-plugin) - -Cloudify OpenStack Plugin - -## Usage - -See [Openstack Plugin](https://docs.cloudify.co/latest/developer/official_plugins/openstack/) - - -## Known Issues - -You may experience such an error when using a local profile: - -```shell -ERROR:cloudify.cli.main:(PyYAML 3.10 (/.../python2.7/site-packages), Requirement.parse('PyYAML>=3.12'), set(['oslo.config'])) -``` - -Cloudify CLI requires PyYAML 3.10, whereas Openstack Python SDK Libraries require PyYAML 3.12. For this reason, if you wish to use Cloudify Openstack Plugin in a local profile, you will need to upgrade the PyYAML 3.12 in your virtualenv. - -Fix: - -```shell -pip install -U pyyaml==3.12 -``` - -At this stage, you should no longer use the flag `--install-plugins` with the `cfy` CLI. +[![Build Status](https://circleci.com/gh/cloudify-cosmo/cloudify-openstack-plugin.svg?style=shield&circle-token=:circle-token)](https://circleci.com/gh/cloudify-cosmo/cloudify-openstack-plugin) + +# cloudify-openstack-plugin diff --git a/README.rst b/README.rst deleted file mode 100644 index eaa0de6e..00000000 --- a/README.rst +++ /dev/null @@ -1,4 +0,0 @@ -cloudify-openstack-plugin -========================= - -Cloudify OpenStack Plugin diff --git a/blueprints/boot-volumes.yaml b/blueprints/boot-volumes.yaml deleted file mode 100644 index 3558959d..00000000 --- a/blueprints/boot-volumes.yaml +++ /dev/null @@ -1,230 +0,0 @@ -tosca_definitions_version: cloudify_dsl_1_3 - -imports: - - http://www.getcloudify.org/spec/cloudify/4.3.1/types.yaml - - plugin:cloudify-utilities-plugin - - plugin:cloudify-openstack-plugin - -inputs: - - keystone_username: - default: { get_secret: keystone_username } - - keystone_password: - default: { get_secret: keystone_password } - - keystone_tenant_name: - default: { get_secret: keystone_tenant_name } - - keystone_url: - default: { get_secret: keystone_url } - - region: - default: { get_secret: region } - - external_network_name: - default: { get_secret: external_network_name } - - security_group_rules: - default: - - remote_ip_prefix: 0.0.0.0/0 - port: 22 - - volume1_use_existing: - default: false - - volume1_name: - default: volume1_name - - volume1_size: - default: 1 - - volume2_use_existing: - default: false - - volume2_name: - default: volume2_name - - volume2_size: - default: 1 - - image: - type: string - default: '' - - flavor: - type: string - default: 4 - -dsl_definitions: - - openstack_config: &openstack_config - username: { get_input: keystone_username } - password: { get_input: keystone_password } - tenant_name: { get_input: keystone_tenant_name } - auth_url: { get_input: keystone_url } - region: { get_input: region } - -node_templates: - - volume_booted_server: - type: cloudify.openstack.nodes.Server - properties: - openstack_config: *openstack_config - agent_config: - install_method: none - server: {} - interfaces: - cloudify.interfaces.lifecycle: - create: - inputs: - args: - image: { get_input: image } - flavor: { get_input: flavor } - availability_zone: nova - block_device_mapping_v2: - - device_name: vdb - boot_index: "1" - uuid: { get_attribute: [volume1, external_id ] } - source_type: volume - volume_size: { get_property: [ volume1, volume, size ] } - delete_on_termination: false - - device_name: vdc - boot_index: "2" - uuid: { get_attribute: [volume2, external_id ] } - source_type: volume - volume_size: { get_property: [ volume2, volume, size ] } - delete_on_termination: false - relationships: - - target: boot_volume_example_key - type: cloudify.openstack.server_connected_to_keypair - - target: public_subnet_nic - type: cloudify.openstack.server_connected_to_port - - target: private_subnet_nic - type: cloudify.openstack.server_connected_to_port - - target: volume2 - type: cloudify.relationships.depends_on - - target: volume1 - type: cloudify.relationships.depends_on - - boot_volume_example_key: - type: cloudify.openstack.nodes.KeyPair - properties: - openstack_config: *openstack_config - resource_id: boot-volume-example-key - private_key_path: { concat: [ '~/.ssh/', { get_property: [ SELF, resource_id ] }, '.pem' ] } - - volume2: - type: cloudify.openstack.nodes.Volume - properties: - openstack_config: *openstack_config - use_external_resource: { get_input: volume2_use_existing } - resource_id: { get_input: volume2_name } - volume: - size: { get_input: volume2_size } - - volume1: - type: cloudify.openstack.nodes.Volume - properties: - openstack_config: *openstack_config - use_external_resource: { get_input: volume1_use_existing } - resource_id: { get_input: volume1_name } - volume: - size: { get_input: volume1_size } - - public_subnet_nic: - type: cloudify.openstack.nodes.Port - properties: - openstack_config: *openstack_config - relationships: - - type: cloudify.relationships.contained_in - target: public_network - - type: cloudify.relationships.depends_on - target: public_subnet - - type: cloudify.openstack.port_connected_to_security_group - target: security_group - - type: cloudify.openstack.port_connected_to_floating_ip - target: public_ip - - private_subnet_nic: - type: cloudify.openstack.nodes.Port - properties: - openstack_config: *openstack_config - relationships: - - type: cloudify.relationships.contained_in - target: private_network - - type: cloudify.relationships.depends_on - target: private_subnet - - type: cloudify.openstack.port_connected_to_security_group - target: security_group - - security_group: - type: cloudify.openstack.nodes.SecurityGroup - properties: - openstack_config: *openstack_config - security_group: - name: basic_security_group - rules: { get_input: security_group_rules } - - public_ip: - type: cloudify.openstack.nodes.FloatingIP - properties: - openstack_config: *openstack_config - floatingip: - floating_network_name: { get_property: [ external_network, resource_id ] } - - private_subnet: - type: cloudify.openstack.nodes.Subnet - properties: - openstack_config: *openstack_config - resource_id: private_subnet - subnet: - ip_version: 4 - cidr: 10.1.0.0/16 - relationships: - - target: private_network - type: cloudify.relationships.contained_in - - target: router - type: cloudify.openstack.subnet_connected_to_router - - private_network: - type: cloudify.openstack.nodes.Network - properties: - openstack_config: *openstack_config - resource_id: private_network - - public_subnet: - type: cloudify.openstack.nodes.Subnet - properties: - openstack_config: *openstack_config - resource_id: public_subnet - subnet: - ip_version: 4 - cidr: 10.0.0.0/16 - relationships: - - target: public_network - type: cloudify.relationships.contained_in - - target: router - type: cloudify.openstack.subnet_connected_to_router - - public_network: - type: cloudify.openstack.nodes.Network - properties: - openstack_config: *openstack_config - resource_id: public_network - - router: - type: cloudify.openstack.nodes.Router - properties: - openstack_config: *openstack_config - resource_id: router - relationships: - - target: external_network - type: cloudify.relationships.connected_to - - external_network: - type: cloudify.openstack.nodes.Network - properties: - openstack_config: *openstack_config - use_external_resource: true - resource_id: { get_input: external_network_name } diff --git a/blueprints/flavor.yaml b/blueprints/flavor.yaml deleted file mode 100644 index df63fe0c..00000000 --- a/blueprints/flavor.yaml +++ /dev/null @@ -1,56 +0,0 @@ -tosca_definitions_version: cloudify_dsl_1_3 - -imports: - - http://www.getcloudify.org/spec/cloudify/4.2/types.yaml - - plugin:cloudify-openstack-plugin - -inputs: - keystone_username: - default: { get_secret: keystone_username } - - keystone_password: - default: { get_secret: keystone_password } - - keystone_tenant_name: - default: { get_secret: keystone_tenant_name } - - keystone_url: - default: { get_secret: keystone_url } - - region: - default: { get_secret: region } - - flavor: - default: - vcpus: 4 - ram: 4096 - disk: 40 - swap: 0 - ephemeral: 0 - is_public: false - - flavor_extra_spec: - default: - "hw:cpu_policy": 'dedicated' - "hw:cpu_threads_policy": 'isolate' - - flavor_tenants: - default: ['cfy_test_project'] - -dsl_definitions: - openstack_config: &openstack_config - username: { get_input: keystone_username } - password: { get_input: keystone_password } - tenant_name: { get_input: keystone_tenant_name } - auth_url: { get_input: keystone_url } - region: { get_input: region } - -node_templates: - test_flavor: - type: cloudify.openstack.nodes.Flavor - properties: - flavor: { get_input: flavor } - extra_specs: { get_input: flavor_extra_spec } - tenants: { get_input: flavor_tenants } - resource_id: 'cfy_test_flavor' - openstack_config: *openstack_config diff --git a/blueprints/image-volume.yaml b/blueprints/image-volume.yaml deleted file mode 100644 index 59fb8f46..00000000 --- a/blueprints/image-volume.yaml +++ /dev/null @@ -1,262 +0,0 @@ -tosca_definitions_version: cloudify_dsl_1_3 - -imports: - - http://www.getcloudify.org/spec/cloudify/4.2/types.yaml - - plugin:cloudify-openstack-plugin - -inputs: - - keystone_username: - default: { get_secret: keystone_username } - - keystone_password: - default: { get_secret: keystone_password } - - keystone_tenant_name: - default: { get_secret: keystone_tenant_name } - - keystone_url: - default: { get_secret: keystone_url } - - region: - default: { get_secret: region } - - external_network_name: - default: { get_secret: external_network_name } - - security_group_rules: - default: - - remote_ip_prefix: 0.0.0.0/0 - port: 22 - - volume1_use_existing: - default: false - - volume1_name: - default: volume1_name - - volume1_size: - default: 1 - - volume2_use_existing: - default: false - - volume2_name: - default: volume2_name - - volume2_size: - default: 1 - - image: - type: string - default: '' - - flavor: - type: string - default: 4 - -dsl_definitions: - - openstack_config: &openstack_config - username: { get_input: keystone_username } - password: { get_input: keystone_password } - tenant_name: { get_input: keystone_tenant_name } - auth_url: { get_input: keystone_url } - region: { get_input: region } - -node_templates: - - # image_volume_server: - # type: cloudify.openstack.nodes.Server - # properties: - # openstack_config: *openstack_config - # agent_config: - # install_method: none - # server: {} - # interfaces: - # cloudify.interfaces.lifecycle: - # create: - # inputs: - # args: - # image: { get_input: image } - # flavor: { get_input: flavor } - # availability_zone: nova - # # block_device_mapping_v2: - # # - device_name: vda - # # boot_index: "0" - # # uuid: { get_attribute: [volume1, external_id ] } - # # source_type: volume - # # volume_size: { get_property: [ volume1, volume, size ] } - # # delete_on_termination: false - # # - device_name: vda - # # boot_index: "1" - # # uuid: { get_attribute: [volume2, external_id ] } - # # source_type: volume - # # volume_size: { get_property: [ volume2, volume, size ] } - # # delete_on_termination: false - # relationships: - # - target: image_volume_example_key - # type: cloudify.openstack.server_connected_to_keypair - # - target: public_subnet_nic - # type: cloudify.openstack.server_connected_to_port - # - target: private_subnet_nic - # type: cloudify.openstack.server_connected_to_port - # # - target: volume2 - # # type: cloudify.relationships.depends_on - # # - target: volume1 - # # type: cloudify.relationships.depends_on - - # image_volume_example_key: - # type: cloudify.openstack.nodes.KeyPair - # properties: - # openstack_config: *openstack_config - # resource_id: image-volume-example-key - # private_key_path: { concat: [ '~/.ssh/', { get_property: [ SELF, resource_id ] }, '.pem' ] } - - # volume2: - # type: cloudify.openstack.nodes.Volume - # properties: - # openstack_config: *openstack_config - # use_external_resource: { get_input: volume2_use_existing } - # resource_id: { get_input: volume2_name } - # volume: - # size: { get_input: volume2_size } - - # volume1: - # type: cloudify.openstack.nodes.Volume - # properties: - # openstack_config: *openstack_config - # use_external_resource: { get_input: volume1_use_existing } - # resource_id: { get_input: volume1_name } - # volume: - # size: { get_input: volume1_size } - - image: - type: cloudify.openstack.nodes.Image - properties: - image: - container_format: "bare" - disk_format: "qcow2" - openstack_config: *openstack_config - image_url: {get_input : image_url } - use_external_resource: true - create_if_missing: true - resource_id: { concat: [{get_input: vnf_name}, "-image"] } - - volume: - type: cloudify.openstack.nodes.Volume - properties: - openstack_config: *openstack_config - volume: - size: 60 - imageRef: 'create operation inputs override' - volume_type: { get_input: volume_type } - boot: true - interfaces: - cloudify.interfaces.lifecycle: - create: - inputs: - args: - size: { get_property: [ SELF, volume, size ] } - imageRef: { get_attribute: [ image, external_id ] } - volume_type: { get_property: [ SELF, volume, volume_type ] } - relationships: - - type: cloudify.relationships.depends_on - target: image - - public_subnet_nic: - type: cloudify.openstack.nodes.Port - properties: - openstack_config: *openstack_config - relationships: - - type: cloudify.relationships.contained_in - target: public_network - - type: cloudify.relationships.depends_on - target: public_subnet - - type: cloudify.openstack.port_connected_to_security_group - target: security_group - - type: cloudify.openstack.port_connected_to_floating_ip - target: public_ip - - private_subnet_nic: - type: cloudify.openstack.nodes.Port - properties: - openstack_config: *openstack_config - relationships: - - type: cloudify.relationships.contained_in - target: private_network - - type: cloudify.relationships.depends_on - target: private_subnet - - type: cloudify.openstack.port_connected_to_security_group - target: security_group - - security_group: - type: cloudify.openstack.nodes.SecurityGroup - properties: - openstack_config: *openstack_config - security_group: - name: basic_security_group - rules: { get_input: security_group_rules } - - public_ip: - type: cloudify.openstack.nodes.FloatingIP - properties: - openstack_config: *openstack_config - floatingip: - floating_network_name: { get_property: [ external_network, resource_id ] } - - private_subnet: - type: cloudify.openstack.nodes.Subnet - properties: - openstack_config: *openstack_config - resource_id: private_subnet - subnet: - ip_version: 4 - cidr: 10.1.0.0/16 - relationships: - - target: private_network - type: cloudify.relationships.contained_in - - target: router - type: cloudify.openstack.subnet_connected_to_router - - private_network: - type: cloudify.openstack.nodes.Network - properties: - openstack_config: *openstack_config - resource_id: private_network - - public_subnet: - type: cloudify.openstack.nodes.Subnet - properties: - openstack_config: *openstack_config - resource_id: public_subnet - subnet: - ip_version: 4 - cidr: 10.0.0.0/16 - relationships: - - target: public_network - type: cloudify.relationships.contained_in - - target: router - type: cloudify.openstack.subnet_connected_to_router - - public_network: - type: cloudify.openstack.nodes.Network - properties: - openstack_config: *openstack_config - resource_id: public_network - - router: - type: cloudify.openstack.nodes.Router - properties: - openstack_config: *openstack_config - resource_id: router - relationships: - - target: external_network - type: cloudify.relationships.connected_to - - external_network: - type: cloudify.openstack.nodes.Network - properties: - openstack_config: *openstack_config - use_external_resource: true - resource_id: { get_input: external_network_name } diff --git a/blueprints/inputs.yaml b/blueprints/inputs.yaml deleted file mode 100644 index f8cd00a3..00000000 --- a/blueprints/inputs.yaml +++ /dev/null @@ -1,6 +0,0 @@ -keystone_username: ***** -keystone_password: ***** -keystone_tenant_name: ***** -keystone_url: ***** -region: ***** -external_network_name: ***** diff --git a/blueprints/ipv6.yaml b/blueprints/ipv6.yaml deleted file mode 100644 index 76c91504..00000000 --- a/blueprints/ipv6.yaml +++ /dev/null @@ -1,176 +0,0 @@ -tosca_definitions_version: cloudify_dsl_1_3 - -imports: - - http://www.getcloudify.org/spec/cloudify/4.4/types.yaml - - plugin:cloudify-openstack-plugin - -inputs: - - username: - description: OS_USERNAME as specified in Openstack RC file. - - password: - description: Openstack user password. - - tenant_name: - description: OS_TENANT_NAME as specified in Openstack RC file. - - auth_url: - description: OS_AUTH_URL as specified in Openstack RC file. - - region: - description: OS_REGION_NAME as specified in Openstack RC file. - - external_network_name: - description: Openstack tenant external network name. - - key_name: - type: string - - ipv4_nameservers: - default: - - 8.8.4.4 - - 8.8.8.8 - - ipv4_subnet_cidr: - default: 192.168.120.0/24 - - ipv4_allocation_pools: - default: - - start: 192.168.120.2 - end: 192.168.120.254 - - ipv6_nameservers: - default: - - 2001:4860:4860::8888 - - 2001:4860:4860::8844 - - ipv6_subnet_cidr: - default: 2605:1c00:50f2:2207::/64 - - ipv6_allocation_pools: - default: - - start: 2605:1c00:50f2:2207::64 - end: 2605:1c00:50f2:2207:ffff:ffff:ffff:ff - - large_image_flavor: - type: string - - cloudify_image_username: - default: centos - - centos_core_image: - type: string - -dsl_definitions: - - client_config: &client_config - username: { get_input: username } - password: { get_input: password } - tenant_name: { get_input: tenant_name } - auth_url: { get_input: auth_url } - region: { get_input: region } - -node_templates: - - external_network: - type: cloudify.openstack.nodes.Network - properties: - openstack_config: *client_config - use_external_resource: true - resource_id: { get_input: external_network_name } - - router: - type: cloudify.openstack.nodes.Router - properties: - openstack_config: *client_config - relationships: - - type: cloudify.relationships.connected_to - target: external_network - - network: - type: cloudify.openstack.nodes.Network - properties: - openstack_config: *client_config - - ipv4_subnet: - type: cloudify.openstack.nodes.Subnet - properties: - openstack_config: *client_config - subnet: - ip_version: 4 - cidr: { get_input: ipv4_subnet_cidr } - dns_nameservers: { get_input: ipv4_nameservers } - allocation_pools: { get_input: ipv4_allocation_pools } - relationships: - - type: cloudify.relationships.contained_in - target: network - - type: cloudify.openstack.subnet_connected_to_router - target: router - - ipv6_subnet: - type: cloudify.openstack.nodes.Subnet - properties: - openstack_config: *client_config - subnet: - ip_version: 6 - cidr: { get_input: ipv6_subnet_cidr } - dns_nameservers: { get_input: ipv6_nameservers } - allocation_pools: { get_input: ipv6_allocation_pools } - relationships: - - type: cloudify.relationships.contained_in - target: network - - cloudify_security_group: - type: cloudify.openstack.nodes.SecurityGroup - properties: - openstack_config: *client_config - rules: - - remote_ip_prefix: 0.0.0.0/0 - port_range_min: null - port_range_max: null - protocol: icmp - - remote_ip_prefix: 0.0.0.0/0 - port_range_min: 22 - port_range_max: 22 - protocol: tcp - - ipv4_port: - type: cloudify.openstack.nodes.Port - properties: - openstack_config: *client_config - relationships: - - type: cloudify.relationships.contained_in - target: network - - type: cloudify.relationships.depends_on - target: ipv4_subnet - - type: cloudify.openstack.port_connected_to_security_group - target: cloudify_security_group - - ipv6_port: - type: cloudify.openstack.nodes.Port - properties: - openstack_config: *client_config - relationships: - - type: cloudify.relationships.contained_in - target: network - - type: cloudify.relationships.depends_on - target: ipv6_subnet - - type: cloudify.openstack.port_connected_to_security_group - target: cloudify_security_group - - host: - type: cloudify.openstack.nodes.Server - properties: - openstack_config: *client_config - agent_config: - install_method: none - server: - key_name: { get_input: key_name } - image: { get_input: centos_core_image } - flavor: { get_input: large_image_flavor } - relationships: - - type: cloudify.openstack.server_connected_to_port - target: ipv4_port - - type: cloudify.openstack.server_connected_to_port - target: ipv6_port diff --git a/blueprints/network_rbac.yaml b/blueprints/network_rbac.yaml deleted file mode 100644 index b5d87750..00000000 --- a/blueprints/network_rbac.yaml +++ /dev/null @@ -1,98 +0,0 @@ -tosca_definitions_version: cloudify_dsl_1_3 - -description: > - Create an Example Openstack Network with RBAC policy - -imports: - - http://www.getcloudify.org/spec/cloudify/4.2/types.yaml - - plugin:cloudify-openstack-plugin - -inputs: - keystone_username: - default: { get_secret: keystone_username } - - keystone_password: - default: { get_secret: keystone_password } - - keystone_tenant_name: - default: { get_secret: keystone_tenant_name } - - keystone_url: - default: { get_secret: keystone_url } - - region: - default: { get_secret: region } - - subnet_cidr: - type: string - default: '192.168.1.0/24' - - admin_project_id: - type: string - - rbac_policy_tenant_name: - type: string - -dsl_definitions: - openstack_config: &openstack_admin_config - username: { get_input: keystone_username } - password: { get_input: keystone_password } - tenant_name: { get_input: keystone_tenant_name } - auth_url: { get_input: keystone_url } - region: { get_input: region } - insecure: true - custom_configuration: - keystone_client: - interface: public - - openstack_config: &openstack_tenant_config - username: { get_input: keystone_username } - password: { get_input: keystone_password } - tenant_name: { get_input: rbac_policy_tenant_name } - auth_url: { get_input: keystone_url } - region: { get_input: region } - insecure: true - custom_configuration: - keystone_client: - interface: public - -node_templates: - network_rbac_policy: - type: cloudify.openstack.nodes.RBACPolicy - properties: - rbac_policy: - target_tenant: { get_input: admin_project_id } - action: access_as_shared - openstack_config: *openstack_admin_config - relationships: - - type: cloudify.openstack.rbac_policy_applied_to - target: network - - network: - type: cloudify.openstack.nodes.Network - properties: - openstack_config: *openstack_tenant_config - - subnet: - type: cloudify.openstack.nodes.Subnet - properties: - openstack_config: *openstack_tenant_config - interfaces: - cloudify.interfaces.lifecycle: - create: - inputs: - args: - ip_version: 4 - cidr: { get_input: subnet_cidr } - relationships: - - type: cloudify.relationships.contained_in - target: network - -outputs: - network: - value: { get_attribute: [ network, external_id ] } - subnet: - value: { get_attribute: [ subnet, external_id ] } - network_rbac_policy: - value: { get_attribute: [ network_rbac_policy, external_id ] } - diff --git a/blueprints/plugin_testing/use_external_admin_resources_test-blueprint.yaml b/blueprints/plugin_testing/use_external_admin_resources_test-blueprint.yaml deleted file mode 100644 index 7ca0ebc4..00000000 --- a/blueprints/plugin_testing/use_external_admin_resources_test-blueprint.yaml +++ /dev/null @@ -1,106 +0,0 @@ -#Blueprint used for testing 'use_external_resource' mode for these node_types: -# -#* cloudify.openstack.nodes.Flavor -#* cloudify.openstack.nodes.Image -#* cloudify.openstack.nodes.Project -#* cloudify.openstack.nodes.User -#* cloudify.openstack.nodes.HostAggregate -# - -tosca_definitions_version: cloudify_dsl_1_3 - -imports: - - http://www.getcloudify.org/spec/cloudify/4.2/types.yaml - - plugin:cloudify-openstack-plugin - -inputs: - openstack_username: - type: string - default: { get_secret: keystone_username } - - openstack_password: - type: string - default: { get_secret: keystone_password } - - openstack_auth_url: - type: string - default: { get_secret: keystone_url } - - openstack_region: - type: string - default: { get_secret: keystone_region } - - openstack_admin_tenant_name: # should admin_tenant name be required to provision new tenet / user ? - type: string - default: 'admin' - -dsl_definitions: - openstack_config: &openstack_config - username: { get_input: openstack_username } - password: { get_input: openstack_password } - tenant_name: { get_input: openstack_admin_tenant_name } - auth_url: { get_input: openstack_auth_url } - region: { get_input: openstack_region } - -node_templates: - test_user: - type: cloudify.openstack.nodes.User - properties: - use_external_resource: true - resource_id: 'openstack_plugin_test_user' - openstack_config: *openstack_config - - test_project: - type: cloudify.openstack.nodes.Project - properties: - use_external_resource: true - resource_id: 'openstack_plugin_test_project' - openstack_config: *openstack_config - relationships: - - type: cloudify.relationships.depends_on - target: test_user - - test_flavor: - type: cloudify.openstack.nodes.Flavor - properties: - use_external_resource: true - resource_id: 'openstack_plugin_test_flavor' - openstack_config: - username: { get_input: openstack_username } - password: { get_input: openstack_password } - auth_url: { get_input: openstack_auth_url } - region: { get_input: openstack_region } - tenant_name: { get_attribute: [test_project, external_name] } - relationships: - - type: cloudify.relationships.depends_on - target: test_project - - test_image: - type: cloudify.openstack.nodes.Image - properties: - use_external_resource: true - resource_id: 'openstack_plugin_test_image' - openstack_config: - username: { get_input: openstack_username } - password: { get_input: openstack_password } - auth_url: { get_input: openstack_auth_url } - region: { get_input: openstack_region } - tenant_name: { get_attribute: [test_project, external_name] } - relationships: - - type: cloudify.relationships.depends_on - target: test_project - - test_host_aggregate: - type: cloudify.openstack.nodes.HostAggregate - properties: - use_external_resource: true - resource_id: 'openstack_plugin_test_image' - openstack_config: - username: { get_input: openstack_username } - password: { get_input: openstack_password } - auth_url: { get_input: openstack_auth_url } - region: { get_input: openstack_region } - tenant_name: { get_attribute: [test_project, external_name] } - relationships: - - type: cloudify.relationships.depends_on - target: test_project diff --git a/blueprints/port-external-server.yaml b/blueprints/port-external-server.yaml deleted file mode 100644 index b389c059..00000000 --- a/blueprints/port-external-server.yaml +++ /dev/null @@ -1,145 +0,0 @@ -tosca_definitions_version: cloudify_dsl_1_3 - -imports: - - http://www.getcloudify.org/spec/cloudify/4.3.1/types.yaml - - plugin:cloudify-openstack-plugin - -inputs: - - username: - type: string - - keystone_password: - type: string - - tenant_name: - type: string - - auth_url: - type: string - - region: - type: string - - network_id: - type: string - - subnet_id: - type: string - - ubuntu_trusty_image: - type: string - - small_image_flavor: - type: string - -dsl_definitions: - - openstack_config: &openstack_config - username: { get_input: username } - password: { get_input: keystone_password } - tenant_name: { get_input: tenant_name } - auth_url: { get_input: auth_url } - region: { get_input: region } - -node_templates: - - external_server: - type: cloudify.openstack.nodes.Server - properties: - openstack_config: *openstack_config - use_external_resource: true - agent_config: - install_method: none - interfaces: - cloudify.interfaces.lifecycle: - create: - inputs: - resource_id: { get_attribute: [server, external_id]} - relationships: - - type: cloudify.relationships.contained_in - target: private_network - - type: cloudify.openstack.server_connected_to_port - target: private_port - - type: cloudify.relationships.depends_on - target: server - - server: - type: cloudify.openstack.nodes.Server - properties: - openstack_config: *openstack_config - agent_config: - install_method: none - server: - key_name: '' - image: { get_input: ubuntu_trusty_image } - flavor: { get_input: small_image_flavor } - relationships: - - target: network - type: cloudify.relationships.contained_in - - target: port - type: cloudify.openstack.server_connected_to_port - - port: - type: cloudify.openstack.nodes.Port - properties: - openstack_config: *openstack_config - relationships: - - type: cloudify.relationships.contained_in - target: network - - type: cloudify.relationships.depends_on - target: subnet - - type: cloudify.openstack.port_connected_to_security_group - target: security_group - - private_port: - type: cloudify.openstack.nodes.Port - properties: - openstack_config: *openstack_config - relationships: - - type: cloudify.relationships.depends_on - target: private_network - - type: cloudify.relationships.depends_on - target: private_subnet - - type: cloudify.openstack.port_connected_to_security_group - target: security_group - - subnet: - type: cloudify.openstack.nodes.Subnet - properties: - openstack_config: *openstack_config - use_external_resource: true - resource_id: { get_input: subnet_id } - relationships: - - type: cloudify.relationships.contained_in - target: network - - private_subnet: - type: cloudify.openstack.nodes.Subnet - properties: - openstack_config: *openstack_config - subnet: - ip_version: 4 - cidr: '10.10.131.0/24' - relationships: - - type: cloudify.relationships.contained_in - target: private_network - - private_network: - type: cloudify.openstack.nodes.Network - properties: - openstack_config: *openstack_config - - network: - type: cloudify.openstack.nodes.Network - properties: - openstack_config: *openstack_config - use_external_resource: true - resource_id: { get_input: network_id } - - security_group: - type: cloudify.openstack.nodes.SecurityGroup - properties: - openstack_config: *openstack_config - security_group: - description: My Test Security Group diff --git a/blueprints/port-fixed-ip.yaml b/blueprints/port-fixed-ip.yaml deleted file mode 100644 index 0041940d..00000000 --- a/blueprints/port-fixed-ip.yaml +++ /dev/null @@ -1,108 +0,0 @@ -tosca_definitions_version: cloudify_dsl_1_3 - -imports: - - http://www.getcloudify.org/spec/cloudify/4.3.1/types.yaml - - plugin:cloudify-openstack-plugin - -inputs: - - username: - type: string - - keystone_password: - type: string - - tenant_name: - type: string - - auth_url: - type: string - - region: - type: string - - network_id: - type: string - - subnet_id: - type: string - - fixed_ip: - type: string - - second_fixed_ip: - type: string - -dsl_definitions: - - openstack_config: &openstack_config - username: { get_input: username } - password: { get_input: keystone_password } - tenant_name: { get_input: tenant_name } - auth_url: { get_input: auth_url } - region: { get_input: region } - -node_templates: - - assigned_port_ip: - type: cloudify.openstack.nodes.Port - properties: - openstack_config: *openstack_config - use_external_resource: true - port: - allowed_address_pairs: [ {"ip_address": { get_input: second_fixed_ip }}] - interfaces: - cloudify.interfaces.lifecycle: - create: - inputs: - resource_id: { get_attribute: [port, external_id]} - relationships: - - type: cloudify.relationships.depends_on - target: port - - port: - type: cloudify.openstack.nodes.Port - properties: - openstack_config: *openstack_config - fixed_ip: { get_input: fixed_ip } - relationships: - - type: cloudify.relationships.contained_in - target: private_network - - type: cloudify.relationships.depends_on - target: private_subnet - - type: cloudify.openstack.port_connected_to_security_group - target: security_group - # interfaces: - # cloudify.interfaces.lifecycle: - # create: - # inputs: - # args: - # name: os_port - # fixed_ips: - # - ip_address: { get_input: fixed_ip } - # subnet_id: { get_attribute: [ private_subnet, external_id ] } - - security_group: - type: cloudify.openstack.nodes.SecurityGroup - properties: - openstack_config: *openstack_config - security_group: - name: test-security-group - description: My Test Security Group - - private_subnet: - type: cloudify.openstack.nodes.Subnet - properties: - openstack_config: *openstack_config - use_external_resource: true - resource_id: { get_input: subnet_id } - relationships: - - type: cloudify.relationships.contained_in - target: private_network - - private_network: - type: cloudify.openstack.nodes.Network - properties: - openstack_config: *openstack_config - use_external_resource: true - resource_id: { get_input: network_id } diff --git a/blueprints/port-security-group.yaml b/blueprints/port-security-group.yaml deleted file mode 100644 index 8d829d99..00000000 --- a/blueprints/port-security-group.yaml +++ /dev/null @@ -1,151 +0,0 @@ -tosca_definitions_version: cloudify_dsl_1_3 - -imports: - - http://www.getcloudify.org/spec/cloudify/4.5/types.yaml - - plugin:cloudify-openstack-plugin - -inputs: - - username: - type: string - - keystone_password: - type: string - - tenant_name: - type: string - - auth_url: - type: string - - region: - type: string - - external_network_name: - type: string - - network_id: - type: string - - subnet_id: - type: string - - ubuntu_trusty_image: - type: string - - small_image_flavor: - type: string - -dsl_definitions: - - openstack_config: &openstack_config - username: { get_input: username } - password: { get_input: keystone_password } - tenant_name: { get_input: tenant_name } - auth_url: { get_input: auth_url } - region: { get_input: region } - -node_templates: - - server: - type: cloudify.openstack.nodes.Server - properties: - openstack_config: *openstack_config - agent_config: - install_method: none - server: - image: { get_input: ubuntu_trusty_image } - flavor: { get_input: small_image_flavor } - relationships: - - target: network - type: cloudify.relationships.contained_in - - target: key - type: cloudify.openstack.server_connected_to_keypair - - target: port - type: cloudify.openstack.server_connected_to_port - - key: - type: cloudify.openstack.nodes.KeyPair - properties: - openstack_config: *openstack_config - resource_id: example-key - private_key_path: { concat: [ '~/.ssh/', { get_property: [ SELF, resource_id ] }, '.pem' ] } - - port: - type: cloudify.openstack.nodes.Port - properties: - openstack_config: *openstack_config - relationships: - - type: cloudify.relationships.contained_in - target: network - - type: cloudify.relationships.depends_on - target: subnet - - type: cloudify.openstack.port_connected_to_security_group - target: security_group - - type: cloudify.openstack.port_connected_to_floating_ip - target: ip - - ip: - type: cloudify.openstack.nodes.FloatingIP - properties: - openstack_config: *openstack_config - floatingip: - floating_network_name: { get_input: external_network_name } - relationships: - - type: cloudify.relationships.contained_in - target: network - - subnet: - type: cloudify.openstack.nodes.Subnet - properties: - openstack_config: *openstack_config - use_external_resource: true - resource_id: { get_input: subnet_id } - relationships: - - type: cloudify.relationships.contained_in - target: network - - network: - type: cloudify.openstack.nodes.Network - properties: - openstack_config: *openstack_config - use_external_resource: true - resource_id: { get_input: network_id } - - router: - type: cloudify.openstack.nodes.Router - properties: - openstack_config: *openstack_config - relationships: - - target: external_network - type: cloudify.relationships.connected_to - - external_network: - type: cloudify.openstack.nodes.Network - properties: - openstack_config: *openstack_config - use_external_resource: true - resource_id: { get_input: external_network_name } - - security_group: - type: cloudify.openstack.nodes.SecurityGroup - properties: - openstack_config: *openstack_config - security_group: - description: My Test Security Group - relationships: - - type: cloudify.relationships.contained_in - target: network - -groups: - - broken_scale_group: - members: [server, ip, security_group] - -policies: - - broken_scale_policy: - type: cloudify.policies.scaling - properties: - default_instances: 1 - targets: [broken_scale_group] diff --git a/blueprints/resource-id.yaml b/blueprints/resource-id.yaml deleted file mode 100644 index 7aaa5583..00000000 --- a/blueprints/resource-id.yaml +++ /dev/null @@ -1,51 +0,0 @@ -tosca_definitions_version: cloudify_dsl_1_3 - -imports: - - http://www.getcloudify.org/spec/cloudify/4.3.1/types.yaml - - plugin:cloudify-openstack-plugin - -inputs: - - username: - type: string - - keystone_password: - type: string - - tenant_name: - type: string - - auth_url: - type: string - - region: - type: string - - network_id: - type: string - - subnet_id: - type: string - -dsl_definitions: - - openstack_config: &openstack_config - username: { get_input: username } - password: { get_input: keystone_password } - tenant_name: { get_input: tenant_name } - auth_url: { get_input: auth_url } - region: { get_input: region } - -node_templates: - - network: - type: cloudify.openstack.nodes.Network - properties: - openstack_config: *openstack_config - use_external_resource: true - interfaces: - cloudify.interfaces.lifecycle: - create: - implementation: openstack.neutron_plugin.network.create - inputs: - resource_id: { get_input: network_id } diff --git a/blueprints/router.yaml b/blueprints/router.yaml deleted file mode 100644 index 78f89362..00000000 --- a/blueprints/router.yaml +++ /dev/null @@ -1,111 +0,0 @@ -tosca_definitions_version: cloudify_dsl_1_3 - -imports: - - http://www.getcloudify.org/spec/cloudify/4.2/types.yaml - - plugin:cloudify-openstack-plugin - -inputs: - - keystone_username: - default: { get_secret: keystone_username } - - keystone_password: - default: { get_secret: keystone_password } - - keystone_tenant_name: - default: { get_secret: keystone_tenant_name } - - keystone_url: - default: { get_secret: keystone_url } - - region: - default: { get_secret: region } - - external_network_name: - description: Openstack tenant external network name. - -dsl_definitions: - openstack_config: &OPENSTACK_CONFIG - username: { get_input: keystone_username } - password: { get_input: keystone_password } - tenant_name: { get_input: keystone_tenant_name } - auth_url: { get_input: keystone_url } - region: { get_input: region } - -node_templates: - - external_network: - type: cloudify.openstack.nodes.Network - properties: - use_external_resource: true - resource_id: { get_input: external_network_name } - openstack_config: *OPENSTACK_CONFIG - - network: - type: cloudify.openstack.nodes.Network - properties: - resource_id: 'ja-simple-vm-network' - openstack_config: *OPENSTACK_CONFIG - - network2: - type: cloudify.openstack.nodes.Network - properties: - resource_id: 'ja-simple-vm-network2' - openstack_config: *OPENSTACK_CONFIG - - external_router_add_route: - type: cloudify.openstack.nodes.Router - properties: - use_external_resource: false - resource_id: external_router_add_route - openstack_config: *OPENSTACK_CONFIG - relationships: - - target: external_network - type: cloudify.relationships.connected_to - - subnet2: - type: cloudify.openstack.nodes.Subnet - properties: - resource_id: 'ja-simple-vm-subnet2' - subnet: - ip_version: 4 - cidr: 192.168.123.0/24 - dns_nameservers: [8.8.4.4, 8.8.8.8] - openstack_config: *OPENSTACK_CONFIG - relationships: - - target: network2 - type: cloudify.relationships.contained_in - - target: external_router_add_route - type: cloudify.openstack.subnet_connected_to_router - target_interfaces: - cloudify.interfaces.relationship_lifecycle: - unlink: - implementation: openstack.neutron_plugin.router.disconnect_subnet - - subnet: - type: cloudify.openstack.nodes.Subnet - properties: - resource_id: 'ja-simple-vm-subnet' - subnet: - ip_version: 4 - cidr: 10.0.0.0/24 - dns_nameservers: [8.8.4.4, 8.8.8.8] - openstack_config: *OPENSTACK_CONFIG - relationships: - - target: network - type: cloudify.relationships.contained_in - - target: subnet2 - type: cloudify.relationships.depends_on - - target: external_router_add_route - type: cloudify.openstack.subnet_connected_to_router - target_interfaces: - cloudify.interfaces.relationship_lifecycle: - establish: - implementation: openstack.neutron_plugin.router.update_routes - inputs: - args: - routes: - - destination: 10.0.0.0/24 - nexthop: 192.168.123.123 - unlink: - implementation: openstack.neutron_plugin.router.disconnect_subnet diff --git a/blueprints/routes.yaml b/blueprints/routes.yaml deleted file mode 100644 index a4c39e3f..00000000 --- a/blueprints/routes.yaml +++ /dev/null @@ -1,62 +0,0 @@ -tosca_definitions_version: cloudify_dsl_1_3 - -imports: - - http://www.getcloudify.org/spec/cloudify/4.2/types.yaml - - plugin:cloudify-openstack-plugin - -inputs: - - keystone_username: - default: { get_secret: keystone_username } - - keystone_password: - default: { get_secret: keystone_password } - - keystone_tenant_name: - default: { get_secret: keystone_tenant_name } - - keystone_url: - default: { get_secret: keystone_url } - - region: - default: { get_secret: region } - - -dsl_definitions: - openstack_config: &OPENSTACK_CONFIG - username: { get_input: keystone_username } - password: { get_input: keystone_password } - tenant_name: { get_input: keystone_tenant_name } - auth_url: { get_input: keystone_url } - region: { get_input: region } - -node_templates: - - router: - type: cloudify.openstack.nodes.Router - properties: - use_external_resource: true - resource_id: { get_secret: router_name } - openstack_config: *OPENSTACK_CONFIG - - routes_1: - type: cloudify.openstack.nodes.Routes - properties: - routes: - - destination: 192.168.121.0/24 - nexthop: 192.168.120.123 - openstack_config: *OPENSTACK_CONFIG - relationships: - - target: router - type: cloudify.openstack.route_connected_to_router - - routes_2: - type: cloudify.openstack.nodes.Routes - properties: - routes: - - destination: 10.10.0.0/16 - nexthop: 192.168.120.123 - openstack_config: *OPENSTACK_CONFIG - relationships: - - target: router - type: cloudify.openstack.route_connected_to_router diff --git a/blueprints/server-groups.yaml b/blueprints/server-groups.yaml deleted file mode 100644 index 52e7c1c3..00000000 --- a/blueprints/server-groups.yaml +++ /dev/null @@ -1,165 +0,0 @@ -tosca_definitions_version: cloudify_dsl_1_3 - -description: > - This blueprint provides the basic architecture for an Openstack blueprint. - -imports: - - http://www.getcloudify.org/spec/cloudify/4.2/types.yaml - - plugin:cloudify-openstack-plugin - -inputs: - - keystone_username: - default: { get_secret: keystone_username } - - keystone_password: - default: { get_secret: keystone_password } - - keystone_tenant_name: - default: { get_secret: keystone_tenant_name } - - keystone_url: - default: { get_secret: keystone_url } - - region: - default: { get_secret: region } - - external_network_name: - default: { get_secret: external_network_name } - - router_name: - default: { get_secret: router_name } - - public_network_name: - default: { get_secret: public_network_name } - - public_subnet_name: - default: { get_secret: public_subnet_name } - - public_subnet_cidr: - default: 192.168.120.0/24 - - ubuntu_trusty_image: - default: { get_secret: ubuntu_trusty_image } - - small_image_flavor: - default: { get_secret: small_image_flavor } - -dsl_definitions: - - openstack_config: &openstack_config - username: { get_input: keystone_username } - password: { get_input: keystone_password } - tenant_name: { get_input: keystone_tenant_name } - auth_url: { get_input: keystone_url } - region: { get_input: region } - custom_configuration: - nova_client: - version: '2.15' - -node_templates: - - server_group_member: - type: cloudify.openstack.nodes.Server - properties: - openstack_config: *openstack_config - agent_config: - install_method: none - server: - image: { get_input: ubuntu_trusty_image } - flavor: { get_input: small_image_flavor } - availability_zone: nova - relationships: - - target: example_key - type: cloudify.openstack.server_connected_to_keypair - - target: public_subnet_nic - type: cloudify.openstack.server_connected_to_port - - type: cloudify.openstack.server_connected_to_server_group - target: server_group - - server_group: - type: cloudify.openstack.nodes.ServerGroup - properties: - openstack_config: *openstack_config - # Another, better way of specifying. - # server_group: - # policies: - # - anti-affinity - policy: affinity - - example_key: - type: cloudify.openstack.nodes.KeyPair - properties: - openstack_config: *openstack_config - resource_id: example-key - private_key_path: { concat: [ '~/.ssh/', { get_property: [ SELF, resource_id ] }, '.pem' ] } - - public_subnet_nic: - type: cloudify.openstack.nodes.Port - properties: - openstack_config: *openstack_config - relationships: - - type: cloudify.relationships.contained_in - target: public_network - - type: cloudify.relationships.depends_on - target: public_subnet - - type: cloudify.openstack.port_connected_to_security_group - target: server_groups_example_security_group - - type: cloudify.openstack.port_connected_to_floating_ip - target: public_ip - - server_groups_example_security_group: - type: cloudify.openstack.nodes.SecurityGroup - properties: - openstack_config: *openstack_config - security_group: - name: server_groups_example_security_group - rules: - - remote_ip_prefix: 0.0.0.0/0 - port: 22 - - public_ip: - type: cloudify.openstack.nodes.FloatingIP - properties: - openstack_config: *openstack_config - floatingip: - floating_network_name: { get_property: [ external_network, resource_id ] } - - public_subnet: - type: cloudify.openstack.nodes.Subnet - properties: - openstack_config: *openstack_config - subnet: - ip_version: 4 - cidr: { get_input: public_subnet_cidr } - # use_external_resource: true - # resource_id: { get_input: public_subnet_name } - relationships: - - target: public_network - type: cloudify.relationships.contained_in - - target: router - type: cloudify.openstack.subnet_connected_to_router - - public_network: - type: cloudify.openstack.nodes.Network - properties: - openstack_config: *openstack_config - # use_external_resource: true - # resource_id: { get_input: public_network_name } - - router: - type: cloudify.openstack.nodes.Router - properties: - openstack_config: *openstack_config - # use_external_resource: true - # resource_id: { get_input: router_name } - relationships: - - target: external_network - type: cloudify.relationships.connected_to - - external_network: - type: cloudify.openstack.nodes.Network - properties: - openstack_config: *openstack_config - use_external_resource: true - resource_id: { get_input: external_network_name } diff --git a/blueprints/server-networks.yaml b/blueprints/server-networks.yaml deleted file mode 100644 index 15188b86..00000000 --- a/blueprints/server-networks.yaml +++ /dev/null @@ -1,154 +0,0 @@ -tosca_definitions_version: cloudify_dsl_1_3 - -imports: - - http://www.getcloudify.org/spec/cloudify/4.3.1/types.yaml - - plugin:cloudify-openstack-plugin - -inputs: - - username: - type: string - - keystone_password: - type: string - - tenant_name: - type: string - - auth_url: - type: string - - region: - type: string - - external_network_name: - type: string - - network_id: - type: string - - subnet_id: - type: string - - fixed_ip: - type: string - - ubuntu_trusty_image: - type: string - - small_image_flavor: - type: string - -dsl_definitions: - - openstack_config: &openstack_config - username: { get_input: username } - password: { get_input: keystone_password } - tenant_name: { get_input: tenant_name } - auth_url: { get_input: auth_url } - region: { get_input: region } - -node_templates: - - server: - type: cloudify.openstack.nodes.Server - properties: - openstack_config: *openstack_config - agent_config: - install_method: none - server: - image: { get_input: ubuntu_trusty_image } - flavor: { get_input: small_image_flavor } - relationships: - - target: network - type: cloudify.relationships.contained_in - - target: key - type: cloudify.openstack.server_connected_to_keypair - - target: port - type: cloudify.openstack.server_connected_to_port - - key: - type: cloudify.openstack.nodes.KeyPair - properties: - openstack_config: *openstack_config - resource_id: example-key - private_key_path: { concat: [ '~/.ssh/', { get_property: [ SELF, resource_id ] }, '.pem' ] } - - port: - type: cloudify.openstack.nodes.Port - properties: - openstack_config: *openstack_config - fixed_ip: { get_input: fixed_ip } - relationships: - - type: cloudify.relationships.contained_in - target: network - - type: cloudify.relationships.depends_on - target: subnet - - type: cloudify.openstack.port_connected_to_security_group - target: security_group - - type: cloudify.openstack.port_connected_to_floating_ip - target: ip - - ip: - type: cloudify.openstack.nodes.FloatingIP - properties: - openstack_config: *openstack_config - floatingip: - floating_network_name: { get_input: external_network_name } - relationships: - - type: cloudify.relationships.contained_in - target: network - - subnet: - type: cloudify.openstack.nodes.Subnet - properties: - openstack_config: *openstack_config - use_external_resource: true - resource_id: { get_input: subnet_id } - relationships: - - type: cloudify.relationships.contained_in - target: network - - type: cloudify.openstack.subnet_connected_to_router - target: router - - network: - type: cloudify.openstack.nodes.Network - properties: - openstack_config: *openstack_config - use_external_resource: true - resource_id: { get_input: network_id } - - router: - type: cloudify.openstack.nodes.Router - properties: - openstack_config: *openstack_config - relationships: - - target: external_network - type: cloudify.relationships.connected_to - - external_network: - type: cloudify.openstack.nodes.Network - properties: - openstack_config: *openstack_config - use_external_resource: true - resource_id: { get_input: external_network_name } - - security_group: - type: cloudify.openstack.nodes.SecurityGroup - properties: - openstack_config: *openstack_config - security_group: - description: My Test Security Group - -groups: - - heal_group: - members: [server, port, ip] - -policies: - - heal_policy: - type: cloudify.policies.scaling - properties: - default_instances: 1 - targets: [heal_group] diff --git a/blueprints/volume.yaml b/blueprints/volume.yaml deleted file mode 100644 index 0a88a5ea..00000000 --- a/blueprints/volume.yaml +++ /dev/null @@ -1,53 +0,0 @@ -tosca_definitions_version: cloudify_dsl_1_3 - -imports: - - http://www.getcloudify.org/spec/cloudify/4.3.1/types.yaml - - plugin:cloudify-openstack-plugin - -inputs: - - username: - type: string - - keystone_password: - type: string - - tenant_name: - type: string - - auth_url: - type: string - - region: - type: string - -dsl_definitions: - - openstack_config: &openstack_config - username: { get_input: username } - password: { get_input: keystone_password } - tenant_name: { get_input: tenant_name } - auth_url: { get_input: auth_url } - region: { get_input: region } - -node_templates: - - volume: - type: cloudify.openstack.nodes.Volume - properties: - use_external_resource: true - # # resource_id: 1a914c31-a940-4352-9d17-cb1bffd1c086 - resource_id: 0000000-a940-4352-9d17-cb1bffd1c086 - create_if_missing: true - openstack_config: *openstack_config - volume: - size: 20 - interfaces: - cloudify.interfaces.lifecycle: - create: - implementation: openstack.cinder_plugin.volume.create - inputs: - args: {} - status_attempts: 20 - status_timeout: 15 - resource_id: { get_property: [ SELF, resource_id ] } diff --git a/cinder_plugin/__init__.py b/cinder_plugin/__init__.py deleted file mode 100644 index a9dfcc44..00000000 --- a/cinder_plugin/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -######### -# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. diff --git a/cinder_plugin/tests/__init__.py b/cinder_plugin/tests/__init__.py deleted file mode 100644 index a9dfcc44..00000000 --- a/cinder_plugin/tests/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -######### -# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. diff --git a/cinder_plugin/tests/test_volume.py b/cinder_plugin/tests/test_volume.py deleted file mode 100644 index 66f58cfb..00000000 --- a/cinder_plugin/tests/test_volume.py +++ /dev/null @@ -1,660 +0,0 @@ -######### -# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - -import mock -import unittest - -from cloudify import mocks as cfy_mocks -from cloudify import exceptions as cfy_exc -from cloudify.state import current_ctx -from cinder_plugin import volume -from nova_plugin import server -from openstack_plugin_common import (OPENSTACK_AZ_PROPERTY, - OPENSTACK_ID_PROPERTY, - OPENSTACK_TYPE_PROPERTY, - OPENSTACK_NAME_PROPERTY, - OPENSTACK_RESOURCE_PROPERTY) - - -class TestCinderVolume(unittest.TestCase): - - def _mock(self, **kwargs): - ctx = cfy_mocks.MockCloudifyContext(**kwargs) - current_ctx.set(ctx) - return ctx - - def tearDown(self): - current_ctx.clear() - - def test_create_new(self): - volume_name = 'fake volume name' - volume_description = 'fake volume' - volume_id = '00000000-0000-0000-0000-000000000000' - volume_size = 10 - - volume_properties = { - 'volume': { - 'size': volume_size, - 'description': volume_description - }, - 'use_external_resource': False, - 'device_name': '/dev/fake', - 'resource_id': volume_name, - } - - creating_volume_m = mock.Mock() - creating_volume_m.id = volume_id - creating_volume_m.bootable = False - creating_volume_m.status = volume.VOLUME_STATUS_CREATING - available_volume_m = mock.Mock() - available_volume_m.id = volume_id - available_volume_m.status = volume.VOLUME_STATUS_AVAILABLE - cinder_client_m = mock.Mock() - cinder_client_m.volumes = mock.Mock() - cinder_client_m.volumes.create = mock.Mock( - return_value=creating_volume_m) - cinder_client_m.volumes.get = mock.Mock( - return_value=available_volume_m) - ctx_m = self._mock(node_id='a', properties=volume_properties) - - volume.create(cinder_client=cinder_client_m, args={}, ctx=ctx_m, - status_attempts=10, status_timeout=2) - - cinder_client_m.volumes.create.assert_called_once_with( - size=volume_size, - name=volume_name, - description=volume_description) - cinder_client_m.volumes.get.assert_called_once_with(volume_id) - self.assertEqual( - volume_id, - ctx_m.instance.runtime_properties[OPENSTACK_ID_PROPERTY]) - self.assertEqual( - volume.VOLUME_OPENSTACK_TYPE, - ctx_m.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY]) - self.assertFalse( - ctx_m.instance.runtime_properties[volume.VOLUME_BOOTABLE]) - - def test_create_use_existing(self): - volume_id = '00000000-0000-0000-0000-000000000000' - - volume_properties = { - 'use_external_resource': True, - 'device_name': '/dev/fake', - 'resource_id': volume_id, - } - existing_volume_m = mock.Mock() - existing_volume_m.id = volume_id - existing_volume_m.status = volume.VOLUME_STATUS_AVAILABLE - existing_volume_m.availability_zone = 'az' - cinder_client_m = mock.Mock() - cinder_client_m.volumes = mock.Mock() - cinder_client_m.volumes.create = mock.Mock() - cinder_client_m.cosmo_get_if_exists = mock.Mock( - return_value=existing_volume_m) - cinder_client_m.get_id_from_resource = mock.Mock( - return_value=volume_id) - ctx_m = self._mock(node_id='a', properties=volume_properties) - - volume.create(cinder_client=cinder_client_m, args={}, ctx=ctx_m, - status_attempts=10, status_timeout=2) - - self.assertFalse(cinder_client_m.volumes.create.called) - self.assertEqual( - volume_id, - ctx_m.instance.runtime_properties[OPENSTACK_ID_PROPERTY]) - self.assertEqual( - volume.VOLUME_OPENSTACK_TYPE, - ctx_m.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY]) - self.assertEqual( - ctx_m.instance.runtime_properties[OPENSTACK_AZ_PROPERTY], - 'az') - self.assertTrue( - ctx_m.instance.runtime_properties[OPENSTACK_RESOURCE_PROPERTY] - ) - - def test_delete(self): - volume_id = '00000000-0000-0000-0000-000000000000' - volume_name = 'test-volume' - - volume_properties = { - 'use_external_resource': False, - } - - cinder_client_m = mock.Mock() - cinder_client_m.cosmo_delete_resource = mock.Mock() - cinder_client_m.volume_snapshots.list = mock.Mock(return_value=[]) - - ctx_m = self._mock(node_id='a', properties=volume_properties) - ctx_m.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = volume_id - ctx_m.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] = \ - volume.VOLUME_OPENSTACK_TYPE - ctx_m.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = \ - volume_name - - volume.delete(cinder_client=cinder_client_m, ctx=ctx_m) - - cinder_client_m.cosmo_delete_resource.assert_called_once_with( - volume.VOLUME_OPENSTACK_TYPE, volume_id) - self.assertTrue( - OPENSTACK_ID_PROPERTY not in ctx_m.instance.runtime_properties) - self.assertTrue(OPENSTACK_TYPE_PROPERTY - not in ctx_m.instance.runtime_properties) - self.assertTrue(OPENSTACK_NAME_PROPERTY - not in ctx_m.instance.runtime_properties) - - @mock.patch('openstack_plugin_common.NovaClientWithSugar') - @mock.patch('openstack_plugin_common.CinderClientWithSugar') - @mock.patch.object(volume, 'wait_until_status', return_value=(None, True)) - def test_attach(self, wait_until_status_m, cinder_m, nova_m): - volume_id = '00000000-0000-0000-0000-000000000000' - server_id = '11111111-1111-1111-1111-111111111111' - device_name = '/dev/fake' - - volume_ctx = cfy_mocks.MockContext({ - 'node': cfy_mocks.MockContext({ - 'properties': {volume.DEVICE_NAME_PROPERTY: device_name} - }), - 'instance': cfy_mocks.MockContext({ - 'runtime_properties': { - OPENSTACK_ID_PROPERTY: volume_id, - } - }) - }) - server_ctx = cfy_mocks.MockContext({ - 'node': cfy_mocks.MockContext({ - 'properties': {} - }), - 'instance': cfy_mocks.MockContext({ - 'runtime_properties': { - server.OPENSTACK_ID_PROPERTY: server_id - } - }) - }) - - ctx_m = self._mock(node_id='a', - target=server_ctx, - source=volume_ctx) - - nova_instance = nova_m.return_value - cinder_instance = cinder_m.return_value - - server.attach_volume(ctx=ctx_m, status_attempts=10, - status_timeout=2) - - nova_instance.volumes.create_server_volume.assert_called_once_with( - server_id, volume_id, device_name) - wait_until_status_m.assert_called_once_with( - cinder_client=cinder_instance, - volume_id=volume_id, - status=volume.VOLUME_STATUS_IN_USE, - num_tries=10, - timeout=2, - ) - - @mock.patch('openstack_plugin_common.NovaClientWithSugar') - @mock.patch('openstack_plugin_common.CinderClientWithSugar') - def _test_cleanup__after_attach_fails( - self, expected_err_cls, expect_cleanup, - wait_until_status_m, cinder_m, nova_m): - volume_id = '00000000-0000-0000-0000-000000000000' - server_id = '11111111-1111-1111-1111-111111111111' - attachment_id = '22222222-2222-2222-2222-222222222222' - device_name = '/dev/fake' - - attachment = {'id': attachment_id, - 'server_id': server_id, - 'volume_id': volume_id} - - volume_ctx = cfy_mocks.MockContext({ - 'node': cfy_mocks.MockContext({ - 'properties': {volume.DEVICE_NAME_PROPERTY: device_name} - }), - 'instance': cfy_mocks.MockContext({ - 'runtime_properties': { - OPENSTACK_ID_PROPERTY: volume_id, - } - }) - }) - server_ctx = cfy_mocks.MockContext({ - 'node': cfy_mocks.MockContext({ - 'properties': {} - }), - 'instance': cfy_mocks.MockContext({ - 'runtime_properties': { - server.OPENSTACK_ID_PROPERTY: server_id - } - }) - }) - - ctx_m = self._mock(node_id='a', - target=server_ctx, - source=volume_ctx) - - attached_volume = mock.Mock(id=volume_id, - status=volume.VOLUME_STATUS_IN_USE, - attachments=[attachment]) - nova_instance = nova_m.return_value - cinder_instance = cinder_m.return_value - cinder_instance.volumes.get.return_value = attached_volume - - with self.assertRaises(expected_err_cls): - server.attach_volume(ctx=ctx_m, status_attempts=10, - status_timeout=2) - - nova_instance.volumes.create_server_volume.assert_called_once_with( - server_id, volume_id, device_name) - volume.wait_until_status.assert_any_call( - cinder_client=cinder_instance, - volume_id=volume_id, - status=volume.VOLUME_STATUS_IN_USE, - num_tries=10, - timeout=2, - ) - if expect_cleanup: - nova_instance.volumes.delete_server_volume.assert_called_once_with( - server_id, attachment_id) - self.assertEqual(2, volume.wait_until_status.call_count) - volume.wait_until_status.assert_called_with( - cinder_client=cinder_instance, - volume_id=volume_id, - status=volume.VOLUME_STATUS_AVAILABLE, - num_tries=10, - timeout=2) - - def test_cleanup_after_waituntilstatus_throws_recoverable_error(self): - err = cfy_exc.RecoverableError('Some recoverable error') - with mock.patch.object(volume, 'wait_until_status', - side_effect=[err, (None, True)]) as wait_mock: - self._test_cleanup__after_attach_fails(type(err), True, wait_mock) - - def test_cleanup_after_waituntilstatus_throws_any_not_nonrecov_error(self): - class ArbitraryNonRecoverableException(Exception): - pass - err = ArbitraryNonRecoverableException('An exception') - with mock.patch.object(volume, 'wait_until_status', - side_effect=[err, (None, True)]) as wait_mock: - self._test_cleanup__after_attach_fails(type(err), True, wait_mock) - - def test_cleanup_after_waituntilstatus_lets_nonrecov_errors_pass(self): - err = cfy_exc.NonRecoverableError('Some non recoverable error') - with mock.patch.object(volume, 'wait_until_status', - side_effect=[err, (None, True)]) as wait_mock: - self._test_cleanup__after_attach_fails(type(err), False, wait_mock) - - @mock.patch.object(volume, 'wait_until_status', return_value=(None, False)) - def test_cleanup_after_waituntilstatus_times_out(self, wait_mock): - self._test_cleanup__after_attach_fails(cfy_exc.RecoverableError, True, - wait_mock) - - @mock.patch('openstack_plugin_common.NovaClientWithSugar') - @mock.patch('openstack_plugin_common.CinderClientWithSugar') - @mock.patch.object(volume, 'wait_until_status', return_value=(None, True)) - def test_detach(self, wait_until_status_m, cinder_m, nova_m): - volume_id = '00000000-0000-0000-0000-000000000000' - server_id = '11111111-1111-1111-1111-111111111111' - attachment_id = '22222222-2222-2222-2222-222222222222' - - attachment = {'id': attachment_id, - 'server_id': server_id, - 'volume_id': volume_id} - - volume_ctx = cfy_mocks.MockContext({ - 'node': cfy_mocks.MockContext({ - 'properties': {} - }), - 'instance': cfy_mocks.MockContext({ - 'runtime_properties': { - OPENSTACK_ID_PROPERTY: volume_id, - } - }) - }) - server_ctx = cfy_mocks.MockContext({ - 'node': cfy_mocks.MockContext({ - 'properties': {} - }), - 'instance': cfy_mocks.MockContext({ - 'runtime_properties': { - server.OPENSTACK_ID_PROPERTY: server_id - } - }) - }) - - ctx_m = self._mock(node_id='a', - target=server_ctx, - source=volume_ctx) - - attached_volume = mock.Mock(id=volume_id, - status=volume.VOLUME_STATUS_IN_USE, - attachments=[attachment]) - nova_instance = nova_m.return_value - cinder_instance = cinder_m.return_value - cinder_instance.volumes.get.return_value = attached_volume - - server.detach_volume(ctx=ctx_m, status_attempts=10, status_timeout=2) - - nova_instance.volumes.delete_server_volume.assert_called_once_with( - server_id, attachment_id) - volume.wait_until_status.assert_called_once_with( - cinder_client=cinder_instance, - volume_id=volume_id, - status=volume.VOLUME_STATUS_AVAILABLE, - num_tries=10, - timeout=2, - ) - - def _simple_volume_ctx(self): - volume_id = '1234-5678' - volume_ctx = cfy_mocks.MockCloudifyContext( - node_id="node_id", - node_name="node_name", - properties={}, - runtime_properties={ - OPENSTACK_ID_PROPERTY: volume_id, - } - ) - current_ctx.set(volume_ctx) - return volume_ctx, volume_id - - @mock.patch('openstack_plugin_common.CinderClientWithSugar') - def test_snapshot_create(self, cinder_m): - cinder_instance = cinder_m.return_value - volume_ctx, volume_id = self._simple_volume_ctx() - - # Snapshot - cinder_instance.backups.create = mock.Mock() - cinder_instance.volume_snapshots.create = mock.Mock() - - volume.snapshot_create(ctx=volume_ctx, snapshot_name="snapshot_name", - snapshot_incremental=True, - snapshot_type="abc") - - cinder_instance.backups.create.assert_not_called() - cinder_instance.volume_snapshots.create.assert_called_once_with( - '1234-5678', description='abc', - force=True, metadata=None, - name='vol-1234-5678-snapshot_name') - - # Backup - cinder_instance.backups.create = mock.Mock() - cinder_instance.volume_snapshots.create = mock.Mock() - - volume.snapshot_create(ctx=volume_ctx, snapshot_name="backup_name", - snapshot_incremental=False) - - cinder_instance.backups.create.assert_called_once_with( - '1234-5678', - name='vol-1234-5678-backup_name') - cinder_instance.volume_snapshots.create.assert_not_called() - - @mock.patch('openstack_plugin_common.CinderClientWithSugar') - @mock.patch('time.sleep', mock.Mock()) - def test_snapshot_delete(self, cinder_m): - cinder_instance = cinder_m.return_value - volume_ctx, volume_id = self._simple_volume_ctx() - - # Snapshot - cinder_instance.backups.list = mock.Mock(return_value=[]) - cinder_instance.volume_snapshots.list = mock.Mock(return_value=[]) - - volume.snapshot_delete(ctx=volume_ctx, snapshot_name="snapshot_name", - snapshot_incremental=True) - - cinder_instance.backups.list.assert_not_called() - cinder_instance.volume_snapshots.list.assert_has_calls([ - mock.call(search_opts={ - 'display_name': 'vol-1234-5678-snapshot_name', - 'volume_id': '1234-5678'})]) - - # Backup - cinder_instance.backups.list = mock.Mock(return_value=[]) - cinder_instance.volume_snapshots.list = mock.Mock(return_value=[]) - - volume.snapshot_delete(ctx=volume_ctx, snapshot_name="backup_name", - snapshot_incremental=False) - - cinder_instance.backups.list.assert_has_calls([ - mock.call(search_opts={ - 'name': 'vol-1234-5678-backup_name', - 'volume_id': '1234-5678'})]) - cinder_instance.volume_snapshots.list.assert_not_called() - - @mock.patch('openstack_plugin_common.CinderClientWithSugar') - def test_snapshot_apply(self, cinder_m): - cinder_instance = cinder_m.return_value - volume_ctx, volume_id = self._simple_volume_ctx() - - # Snapshot - cinder_instance.backups.list = mock.Mock(return_value=[]) - cinder_instance.volume_snapshots.list = mock.Mock(return_value=[]) - - volume.snapshot_apply(ctx=volume_ctx, snapshot_name="snapshot_name", - snapshot_incremental=True) - - cinder_instance.backups.list.assert_not_called() - cinder_instance.volume_snapshots.list.assert_not_called() - - # No such backup - cinder_instance.backups.list = mock.Mock(return_value=[]) - cinder_instance.volume_snapshots.list = mock.Mock(return_value=[]) - - with self.assertRaises(cfy_exc.NonRecoverableError): - volume.snapshot_apply(ctx=volume_ctx, snapshot_name="backup_name", - snapshot_incremental=False) - - cinder_instance.backups.list.assert_called_once_with( - search_opts={ - 'name': 'vol-1234-5678-backup_name', - 'volume_id': '1234-5678'}) - cinder_instance.volume_snapshots.list.assert_not_called() - - # backup exist - backup_mock = mock.Mock() - backup_mock.name = 'vol-1234-5678-backup_name' - backup_mock.id = 'backup_id' - cinder_instance.restores.restore = mock.Mock() - cinder_instance.backups.list = mock.Mock(return_value=[backup_mock]) - - volume.snapshot_apply(ctx=volume_ctx, snapshot_name="backup_name", - snapshot_incremental=False) - - cinder_instance.backups.list.assert_called_once_with( - search_opts={ - 'name': 'vol-1234-5678-backup_name', - 'volume_id': '1234-5678'}) - cinder_instance.restores.restore.assert_called_once_with( - 'backup_id', '1234-5678') - cinder_instance.volume_snapshots.list.assert_not_called() - - @mock.patch('openstack_plugin_common.CinderClientWithSugar') - def test_list_volumes(self, cinder_m): - cinder_instance = cinder_m.return_value - volume_ctx, volume_id = self._simple_volume_ctx() - - cinder_instance.volumes.list = mock.Mock(return_value=[]) - - volume.list_volumes(ctx=volume_ctx, args={"abc": "def"}) - - cinder_instance.volumes.list.assert_called_once_with(abc="def") - self.assertEqual( - {'external_id': '1234-5678', 'volume_list': []}, - volume_ctx.instance.runtime_properties - ) - - @mock.patch('openstack_plugin_common.CinderClientWithSugar') - @mock.patch('time.sleep', mock.Mock()) - def test_delete_snapshot(self, cinder_m): - cinder_instance = cinder_m.return_value - volume_ctx, volume_id = self._simple_volume_ctx() - - # remove any, nothing - cinder_instance.backups.list = mock.Mock(return_value=[]) - cinder_instance.volume_snapshots.list = mock.Mock(return_value=[]) - volume._delete_snapshot(cinder_instance, {'volume_id': volume_id}) - - cinder_instance.backups.list.assert_not_called() - cinder_instance.volume_snapshots.list.assert_has_calls([ - mock.call(search_opts={'volume_id': volume_id})]) - - # remove any, but we have other snapshot - snapshot_mock = mock.Mock() - snapshot_mock.delete = mock.Mock() - snapshot_mock.name = 'snapshot_other' - snapshot_mock.id = 'snapshot_id' - - cinder_instance.backups.list = mock.Mock(return_value=[]) - cinder_instance.volume_snapshots.list = mock.Mock( - return_value=[snapshot_mock]) - - volume._delete_snapshot(cinder_instance, { - 'volume_id': volume_id, 'display_name': 'snapshot_name' - }) - - cinder_instance.backups.list.assert_not_called() - cinder_instance.volume_snapshots.list.assert_has_calls([ - mock.call(search_opts={'volume_id': volume_id, - 'display_name': 'snapshot_name'}), - mock.call(search_opts={'volume_id': volume_id, - 'display_name': 'snapshot_name'})]) - snapshot_mock.delete.assert_not_called() - - # can't delete snapshot - snapshot_mock = mock.Mock() - snapshot_mock.delete = mock.Mock() - snapshot_mock.name = 'snapshot_name' - snapshot_mock.id = 'snapshot_id' - snapshot_mock.status = 'available' - - cinder_instance.backups.list = mock.Mock(return_value=[]) - cinder_instance.volume_snapshots.list = mock.Mock( - return_value=[snapshot_mock]) - - volume_ctx.operation.retry = mock.Mock( - side_effect=cfy_exc.RecoverableError()) - with self.assertRaises(cfy_exc.RecoverableError): - volume._delete_snapshot(cinder_instance, { - 'volume_id': volume_id, 'display_name': 'snapshot_name' - }) - - cinder_instance.backups.list.assert_not_called() - volume_ctx.operation.retry.assert_called_with( - message='snapshot_name is still alive', retry_after=30) - cinder_instance.volume_snapshots.list.assert_has_calls([ - mock.call(search_opts={'volume_id': volume_id, - 'display_name': 'snapshot_name'}), - mock.call(search_opts={'volume_id': volume_id, - 'display_name': 'snapshot_name'})]) - snapshot_mock.delete.assert_called_once_with() - - @mock.patch('openstack_plugin_common.CinderClientWithSugar') - @mock.patch('time.sleep', mock.Mock()) - def test_delete_backup(self, cinder_m): - cinder_instance = cinder_m.return_value - volume_ctx, volume_id = self._simple_volume_ctx() - - # remove any, nothing - cinder_instance.backups.list = mock.Mock(return_value=[]) - cinder_instance.volume_snapshots.list = mock.Mock(return_value=[]) - volume._delete_backup(cinder_instance, {'volume_id': volume_id}) - - cinder_instance.backups.list.assert_has_calls([ - mock.call(search_opts={'volume_id': volume_id})]) - cinder_instance.volume_snapshots.list.assert_not_called() - - # remove any, but we have other backup - backup_mock = mock.Mock() - backup_mock.delete = mock.Mock() - backup_mock.name = 'backup_other' - backup_mock.id = 'backup_id' - backup_mock.status = 'available' - - cinder_instance.backups.list = mock.Mock(return_value=[backup_mock]) - cinder_instance.volume_snapshots.list = mock.Mock(return_value=[]) - - volume._delete_backup(cinder_instance, {'volume_id': volume_id, - 'name': 'backup_name'}) - - cinder_instance.backups.list.assert_has_calls([ - mock.call(search_opts={'volume_id': volume_id, - 'name': 'backup_name'}), - mock.call(search_opts={'volume_id': volume_id, - 'name': 'backup_name'})]) - cinder_instance.volume_snapshots.list.assert_not_called() - backup_mock.delete.assert_not_called() - - # can't delete snapshot - backup_mock = mock.Mock() - backup_mock.delete = mock.Mock() - backup_mock.name = 'backup_name' - backup_mock.id = 'backup_id' - backup_mock.status = 'available' - - cinder_instance.backups.list = mock.Mock(return_value=[backup_mock]) - cinder_instance.volume_snapshots.list = mock.Mock(return_value=[]) - - volume_ctx.operation.retry = mock.Mock( - side_effect=cfy_exc.RecoverableError()) - with self.assertRaises(cfy_exc.RecoverableError): - volume._delete_backup(cinder_instance, {'volume_id': volume_id, - 'name': 'backup_name'}) - - cinder_instance.backups.list.assert_has_calls([ - mock.call(search_opts={'volume_id': volume_id, - 'name': 'backup_name'}), - mock.call(search_opts={'volume_id': volume_id, - 'name': 'backup_name'})]) - cinder_instance.volume_snapshots.list.assert_not_called() - backup_mock.delete.assert_called_once_with() - - @mock.patch('openstack_plugin_common.CinderClientWithSugar') - @mock.patch('time.sleep', mock.Mock()) - def test_wait_until_status(self, cinder_m): - cinder_instance = cinder_m.return_value - volume_ctx, volume_id = self._simple_volume_ctx() - - # ready by first call - volume_mock = mock.Mock() - volume_mock.status = "ready" - cinder_instance.volumes.get = mock.Mock(return_value=volume_mock) - with mock.patch('openstack_plugin_common._find_context_in_kw', - return_value=volume_ctx): - volume.wait_until_status(volume_id=volume_id, status='ready', - num_tries=1, timeout=1) - cinder_instance.volumes.get.assert_called_once_with(volume_id) - - # unready by first call - volume_mock = mock.Mock() - volume_mock.status = "unready" - cinder_instance.volumes.get = mock.Mock(return_value=volume_mock) - with mock.patch('openstack_plugin_common._find_context_in_kw', - return_value=volume_ctx): - self.assertEqual( - volume.wait_until_status(volume_id=volume_id, status='ready', - num_tries=2, timeout=1), - (volume_mock, False) - ) - cinder_instance.volumes.get.assert_has_calls([ - mock.call(volume_id), - mock.call(volume_id)]) - - # volume error - volume_mock = mock.Mock() - volume_mock.status = volume.VOLUME_STATUS_ERROR - cinder_instance.volumes.get = mock.Mock(return_value=volume_mock) - with mock.patch('openstack_plugin_common._find_context_in_kw', - return_value=volume_ctx): - with self.assertRaises(cfy_exc.NonRecoverableError): - volume.wait_until_status(volume_id=volume_id, status='ready', - num_tries=2, timeout=1) - cinder_instance.volumes.get.assert_called_once_with(volume_id) diff --git a/cinder_plugin/volume.py b/cinder_plugin/volume.py deleted file mode 100644 index 1b487234..00000000 --- a/cinder_plugin/volume.py +++ /dev/null @@ -1,299 +0,0 @@ -######### -# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import time - -from cloudify import ctx -from cloudify.decorators import operation -from cloudify import exceptions as cfy_exc - -from openstack_plugin_common import (delete_resource_and_runtime_properties, - with_cinder_client, - use_external_resource, - validate_resource, - add_list_to_runtime_properties, - create_object_dict, - get_openstack_id, - COMMON_RUNTIME_PROPERTIES_KEYS, - OPENSTACK_AZ_PROPERTY, - OPENSTACK_ID_PROPERTY, - OPENSTACK_TYPE_PROPERTY, - OPENSTACK_NAME_PROPERTY) -from glance_plugin.image import handle_image_from_relationship - -VOLUME_STATUS_CREATING = 'creating' -VOLUME_STATUS_DELETING = 'deleting' -VOLUME_STATUS_AVAILABLE = 'available' -VOLUME_STATUS_IN_USE = 'in-use' -VOLUME_STATUS_ERROR = 'error' -VOLUME_STATUS_ERROR_DELETING = 'error_deleting' -VOLUME_ERROR_STATUSES = (VOLUME_STATUS_ERROR, VOLUME_STATUS_ERROR_DELETING) - -# Note: The 'device_name' property should actually be a property of the -# relationship between a server and a volume; It'll move to that -# relationship type once relationship properties are better supported. -DEVICE_NAME_PROPERTY = 'device_name' - -VOLUME_OPENSTACK_TYPE = 'volume' -VOLUME_OPENSTACK_ID_KEY = 'name' -VOLUME_BOOTABLE = 'bootable' - -RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS - - -def _set_volume_runtime_properties(volume): - - try: - ctx.instance.runtime_properties[OPENSTACK_AZ_PROPERTY] = \ - volume.availability_zone - except AttributeError: - ctx.logger.error('Volume availability_zone not found.') - - try: - ctx.instance.runtime_properties[VOLUME_BOOTABLE] = \ - volume.bootable - except AttributeError: - if ctx.node.properties.get('boot', False): - ctx.instance.runtime_properties[VOLUME_BOOTABLE] = True - else: - ctx.instance.runtime_properties[VOLUME_BOOTABLE] = False - - -@operation -@with_cinder_client -def create(cinder_client, - args={}, - status_timeout=15, - status_attempts=20, - **kwargs): - - external_volume = use_external_resource( - ctx, cinder_client, VOLUME_OPENSTACK_TYPE, VOLUME_OPENSTACK_ID_KEY) - - if external_volume: - _set_volume_runtime_properties(external_volume) - return - - volume_dict = create_object_dict(ctx, VOLUME_OPENSTACK_TYPE, args, {}) - handle_image_from_relationship(volume_dict, 'imageRef', ctx) - - v = cinder_client.volumes.create(**volume_dict) - - ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = v.id - ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] = \ - VOLUME_OPENSTACK_TYPE - ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = \ - volume_dict[VOLUME_OPENSTACK_ID_KEY] - wait_until_status(cinder_client=cinder_client, - volume_id=v.id, - status=VOLUME_STATUS_AVAILABLE, - num_tries=status_attempts, - timeout=status_timeout, - ) - _set_volume_runtime_properties(v) - - -def _delete_snapshot(cinder_client, search_opts): - snapshots = cinder_client.volume_snapshots.list(search_opts=search_opts) - # no snapshots - if not snapshots: - return - - for snapshot in snapshots: - ctx.logger.debug("Check snapshot before delete: {}:{} with state {}" - .format(snapshot.id, snapshot.name, snapshot.status)) - if search_opts.get('display_name'): - if snapshot.name != search_opts['display_name']: - continue - if snapshot.status == 'available': - snapshot.delete() - - # wait 10 seconds before next check - time.sleep(10) - - snapshots = cinder_client.volume_snapshots.list(search_opts=search_opts) - for snapshot in snapshots: - ctx.logger.debug("Check snapshot after delete: {}:{} with state {}" - .format(snapshot.id, snapshot.name, snapshot.status)) - if search_opts.get('display_name'): - if snapshot.name == search_opts['display_name']: - return ctx.operation.retry( - message='{} is still alive'.format(snapshot.name), - retry_after=30) - - -def _delete_backup(cinder_client, search_opts): - backups = cinder_client.backups.list(search_opts=search_opts) - - if not backups: - return - - for backup in backups: - if search_opts.get(VOLUME_OPENSTACK_ID_KEY): - if backup.name != search_opts[VOLUME_OPENSTACK_ID_KEY]: - continue - ctx.logger.debug("Check backup before delete: {}:{} with state {}" - .format(backup.id, backup.name, backup.status)) - if backup.status == 'available': - backup.delete() - - # wait 10 seconds before next check - time.sleep(10) - - backups = cinder_client.backups.list(search_opts=search_opts) - - for backup in backups: - ctx.logger.debug("Check backup after delete: {}:{} with state {}" - .format(backup.id, backup.name, backup.status)) - if search_opts.get(VOLUME_OPENSTACK_ID_KEY): - if backup.name == search_opts[VOLUME_OPENSTACK_ID_KEY]: - return ctx.operation.retry( - message='{} is still alive'.format(backup.name), - retry_after=30) - - -@operation -@with_cinder_client -def delete(cinder_client, **kwargs): - # seach snapshots for volume - search_opts = { - 'volume_id': get_openstack_id(ctx), - } - _delete_snapshot(cinder_client, search_opts) - # remove volume itself - delete_resource_and_runtime_properties(ctx, cinder_client, - RUNTIME_PROPERTIES_KEYS) - - -@with_cinder_client -def wait_until_status(cinder_client, volume_id, status, num_tries, - timeout): - for _ in range(num_tries): - volume = cinder_client.volumes.get(volume_id) - - if volume.status in VOLUME_ERROR_STATUSES: - raise cfy_exc.NonRecoverableError( - "Volume {0} is in error state".format(volume_id)) - - if volume.status == status: - return volume, True - time.sleep(timeout) - - ctx.logger.warning("Volume {0} current state: '{1}', " - "expected state: '{2}'".format(volume_id, - volume.status, - status)) - return volume, False - - -@with_cinder_client -def get_attachment(cinder_client, volume_id, server_id): - volume = cinder_client.volumes.get(volume_id) - for attachment in volume.attachments: - if attachment['server_id'] == server_id: - return attachment - - -def _get_snapshot_name(ctx, kwargs): - return "vol-{}-{}".format(get_openstack_id(ctx), kwargs["snapshot_name"]) - - -@with_cinder_client -def snapshot_create(cinder_client, **kwargs): - volume_id = get_openstack_id(ctx) - - backup_name = _get_snapshot_name(ctx, kwargs) - - snapshot_incremental = kwargs["snapshot_incremental"] - if not snapshot_incremental: - ctx.logger.info("Backup create: {}".format(backup_name)) - cinder_client.backups.create(volume_id, name=backup_name) - else: - ctx.logger.info("Snapshot create: {}".format(backup_name)) - description = kwargs.get("snapshot_type", "") - cinder_client.volume_snapshots.create(volume_id, - force=True, - name=backup_name, - description=description, - metadata=None) - - -@with_cinder_client -def snapshot_apply(cinder_client, **kwargs): - volume_id = get_openstack_id(ctx) - - backup_name = _get_snapshot_name(ctx, kwargs) - snapshot_incremental = kwargs["snapshot_incremental"] - if not snapshot_incremental: - ctx.logger.info("Backup apply {} to {}".format(backup_name, volume_id)) - search_opts = { - 'volume_id': volume_id, - VOLUME_OPENSTACK_ID_KEY: backup_name - } - - backups = cinder_client.backups.list( - search_opts=search_opts) - - for backup in backups: - # if returned more than one backup, use first - if backup.name == backup_name: - ctx.logger.debug("Used first with {} to {}" - .format(backup.id, volume_id)) - cinder_client.restores.restore(backup.id, volume_id) - break - else: - raise cfy_exc.NonRecoverableError("No such {} backup." - .format(backup_name)) - else: - ctx.logger.error("Apply snapshot is unsuported") - - -@with_cinder_client -def snapshot_delete(cinder_client, **kwargs): - volume_id = get_openstack_id(ctx) - - backup_name = _get_snapshot_name(ctx, kwargs) - snapshot_incremental = kwargs["snapshot_incremental"] - if not snapshot_incremental: - ctx.logger.info("Backup for remove: {}".format(backup_name)) - # search snaphot for delete - search_opts = { - 'volume_id': volume_id, - VOLUME_OPENSTACK_ID_KEY: backup_name - } - _delete_backup(cinder_client, search_opts) - else: - ctx.logger.info("Snapshot for remove: {}".format(backup_name)) - # search snaphot for delete - search_opts = { - 'volume_id': volume_id, - 'display_name': backup_name - } - - _delete_snapshot(cinder_client, search_opts) - - -@operation -@with_cinder_client -def creation_validation(cinder_client, **kwargs): - validate_resource(ctx, cinder_client, VOLUME_OPENSTACK_TYPE, - VOLUME_OPENSTACK_ID_KEY) - - -@operation -@with_cinder_client -def list_volumes(cinder_client, args, **kwargs): - volume_list = cinder_client.volumes.list(**args) - add_list_to_runtime_properties(ctx, VOLUME_OPENSTACK_TYPE, volume_list) diff --git a/constraints.txt b/constraints.txt deleted file mode 100644 index 5b55594b..00000000 --- a/constraints.txt +++ /dev/null @@ -1,2 +0,0 @@ -cloudify-plugins-common==3.4.2 -PyYAML==3.12 diff --git a/dev-requirements.txt b/dev-requirements.txt index fcb6a806..97fd5799 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -1,3 +1 @@ -https://github.com/cloudify-cosmo/cloudify-dsl-parser/archive/3.4.1.zip -https://github.com/cloudify-cosmo/cloudify-rest-client/archive/3.4.1.zip -https://github.com/cloudify-cosmo/cloudify-plugins-common/archive/3.4.1.zip +cloudify-common==4.5.5 \ No newline at end of file diff --git a/docs/Makefile b/docs/Makefile deleted file mode 100644 index 1bff5a11..00000000 --- a/docs/Makefile +++ /dev/null @@ -1,177 +0,0 @@ -# Makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -PAPER = -BUILDDIR = _build - -# User-friendly check for sphinx-build -ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) -$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) -endif - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . -# the i18n builder cannot share the environment and doctrees with the others -I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . - -.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext - -help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " singlehtml to make a single large HTML file" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " devhelp to make HTML files and a Devhelp project" - @echo " epub to make an epub" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " latexpdf to make LaTeX files and run them through pdflatex" - @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" - @echo " text to make text files" - @echo " man to make manual pages" - @echo " texinfo to make Texinfo files" - @echo " info to make Texinfo files and run them through makeinfo" - @echo " gettext to make PO message catalogs" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " xml to make Docutils-native XML files" - @echo " pseudoxml to make pseudoxml-XML files for display purposes" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - -clean: - rm -rf $(BUILDDIR)/* - -html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -singlehtml: - $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml - @echo - @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." - -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/cloudify-openstack-plugin.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/cloudify-openstack-plugin.qhc" - -devhelp: - $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp - @echo - @echo "Build finished." - @echo "To view the help file:" - @echo "# mkdir -p $$HOME/.local/share/devhelp/cloudify-openstack-plugin" - @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/cloudify-cli" - @echo "# devhelp" - -epub: - $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub - @echo - @echo "Build finished. The epub file is in $(BUILDDIR)/epub." - -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make' in that directory to run these through (pdf)latex" \ - "(use \`make latexpdf' here to do that automatically)." - -latexpdf: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through pdflatex..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -latexpdfja: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through platex and dvipdfmx..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -text: - $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text - @echo - @echo "Build finished. The text files are in $(BUILDDIR)/text." - -man: - $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man - @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man." - -texinfo: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo - @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." - @echo "Run \`make' in that directory to run these through makeinfo" \ - "(use \`make info' here to do that automatically)." - -info: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo "Running Texinfo files through makeinfo..." - make -C $(BUILDDIR)/texinfo info - @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." - -gettext: - $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale - @echo - @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." - -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." - -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." - -doctest: - $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " \ - "results in $(BUILDDIR)/doctest/output.txt." - -xml: - $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml - @echo - @echo "Build finished. The XML files are in $(BUILDDIR)/xml." - -pseudoxml: - $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml - @echo - @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." diff --git a/docs/changelog.rst b/docs/changelog.rst deleted file mode 100644 index a5192b49..00000000 --- a/docs/changelog.rst +++ /dev/null @@ -1,7 +0,0 @@ - - -Changelog -========= - -.. include:: ../CHANGELOG.txt - diff --git a/docs/conf.py b/docs/conf.py deleted file mode 100644 index 3a829451..00000000 --- a/docs/conf.py +++ /dev/null @@ -1,301 +0,0 @@ -# -*- coding: utf-8 -*- -# -# cloudify-openstack-plugin documentation build configuration file, created by -# sphinx-quickstart on Tue Nov 8 14:02:23 2016. -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys -import os - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.intersphinx', - 'sphinx.ext.todo', - 'sphinx.ext.viewcode', - 'sphinxify', -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# source_suffix = ['.rst', '.md'] -source_suffix = '.rst' - -# The encoding of source files. -#source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'cloudify-openstack-plugin' -copyright = u'2016-17 GigaSpaces Technologies Ltd.' -author = u'GigaSpaces Technologies Ltd.' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = u'2.0' -# The full version, including alpha/beta/rc tags. -release = u'2.0' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -#default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -#show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -#keep_warnings = False - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = True - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# html_theme = 'sphinx-rtd-theme' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -#html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] - -# The name for this set of Sphinx documents. -# " v documentation" by default. -#html_title = u'cloudify-openstack-plugin v1.0a1' - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -#html_logo = None - -# The name of an image file (relative to this directory) to use as a favicon of -# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -#html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -#html_extra_path = [] - -# If not None, a 'Last updated on:' timestamp is inserted at every page -# bottom, using the given strftime format. -# The empty string is equivalent to '%b %d, %Y'. -#html_last_updated_fmt = None - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -#html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -#html_additional_pages = {} - -# If false, no module index is generated. -#html_domain_indices = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' -#html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# 'ja' uses this config value. -# 'zh' user can custom change `jieba` dictionary path. -#html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -#html_search_scorer = 'scorer.js' - -# Output file base name for HTML help builder. -htmlhelp_basename = 'cloudify-openstack-plugindoc' - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', - -# Additional stuff for the LaTeX preamble. -#'preamble': '', - -# Latex figure (float) alignment -#'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - (master_doc, 'cloudify-openstack-plugin.tex', u'cloudify-openstack-plugin Documentation', - u'GigaSpaces Technologies Ltd.', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# If true, show page references after internal links. -#latex_show_pagerefs = False - -# If true, show URL addresses after external links. -#latex_show_urls = False - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -#latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'cloudify-openstack-plugin', u'cloudify-openstack-plugin Documentation', - [author], 1) -] - -# If true, show URL addresses after external links. -#man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - (master_doc, 'cloudify-openstack-plugin', u'cloudify-openstack-plugin Documentation', - author, 'cloudify-openstack-plugin', 'One line description of project.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -#texinfo_appendices = [] - -# If false, no module index is generated. -#texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -#texinfo_no_detailmenu = False - - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'https://docs.python.org/2/': None} - - -# SCVersioning -scv_show_banner = True -scv_banner_greatest_tag = True diff --git a/docs/configuration.rst b/docs/configuration.rst deleted file mode 100644 index 2dfb8038..00000000 --- a/docs/configuration.rst +++ /dev/null @@ -1,82 +0,0 @@ -.. _config: - -Openstack Configuration -======================= - -The Openstack plugin requires credentials and endpoint setup information in order to authenticate and interact with Openstack. - -This information will be gathered by the plugin from the following sources, -each source possibly partially or completely overriding values gathered from previous ones: - -1. environment variables for each of the configuration parameters. -2. JSON file at ``~/openstack_config.json`` or at a path specified by the value of an environment variable named ``OPENSTACK_CONFIG_PATH`` -3. values specified in the ``openstack_config`` property for the node whose operation is currently getting executed (in the case of relationship operations, the ``openstack_config`` property of either the **source** or **target** nodes will be used if available, with the **source**'s one taking precedence). - -The structure of the JSON file in section (2), as well as of the ``openstack_config`` property in section (3), is as follows: - -.. highlight:: json - -:: - - { - "username": "", - "password": "", - "tenant_name": "", - "auth_url": "", - "region": "", - "nova_url": "", - "neutron_url": "", - "custom_configuration": "" - } - -* ``username`` username for authentication with Openstack Keystone service. -* ``password`` password for authentication with Openstack Keystone service. -* ``tenant_name`` name of the tenant to be used. -* ``auth_url`` URL of the Openstack Keystone service. - - .. attention:: New in 2.0 - - ``auth_url`` must include the full keystone auth URL, including the version number. - -* ``region`` Openstack region to be used. This may be optional when there's but a single region. -* ``nova_url`` (**DEPRECATED** - instead, use ``custom_configuration`` to pass ``endpoint_override`` directly to the Nova client) explicit URL for the Openstack Nova service. This may be used to override the URL for the Nova service that is listed in the Keystone service. -* ``neutron_url`` (**DEPRECATED** - instead, use ``custom_configuration`` to pass ``endpoint_url`` directly to the Neutron client) explicit URL for the Openstack Neutron service. This may be used to override the URL for the Neutron service that is listed in the Keystone service. -* ``custom_configuration`` a dictionary which allows overriding or directly passing custom configuration parameter to each of the Openstack clients, by using any of the relevant keys: ``keystone_client``, ``nova_client``, ``neutron_client`` or ``cinder_client``. - * Parameters passed directly to Openstack clients using the ``custom_configuration`` mechanism will override other definitions (e.g. any of the common Openstack configuration parameters listed above, such as ``username`` and ``tenant_name``) - * The following is an example for the usage of the ``custom_configuration`` section in a blueprint: - -.. highlight:: yaml - -:: - - custom_configuration: - nova_client: - endpoint_override: nova-endpoint-url - nova_specific_key_1: value_1 - nova_specific_key_2: value_2 - neutron_client: - endpoint_url: neutron-endpoint-url - keystone_client: - .. - cinder_client: - .. - - -The environment variables mentioned in (1) are the standard Openstack environment variables equivalent to the ones in the JSON file or ``openstack_config`` property. In their respective order, they are: - -* ``OS_USERNAME`` -* ``OS_PASSWORD`` -* ``OS_TENANT_NAME`` -* ``OS_AUTH_URL`` -* ``OS_REGION_NAME`` -* ``NOVACLIENT_BYPASS_URL`` -* ``OS_URL`` - -**Note**: ``custom_configuration`` doesn't have an equivalent standard Openstack environment variable. - - - The Openstack manager blueprint stores the Openstack configuration used for the bootstrap process in a JSON file as described in (2) at - ``~/openstack-config.json``. - Therefore, if they've been used for bootstrap, - the Openstack configuration for applications isn't required as the plugin will default to these same settings. - diff --git a/docs/examples.rst b/docs/examples.rst deleted file mode 100644 index 4f367434..00000000 --- a/docs/examples.rst +++ /dev/null @@ -1,338 +0,0 @@ - -.. highlight:: yaml - -Examples -======== - -Example I ---------- - -This example will show how to use most of the types in this plugin, -as well as how to make the relationships between them. - -We'll see how to create a server with a security group set on it and a floating_ip associated to it, -on a subnet in a network. - - -The following is an excerpt from the blueprint's `blueprint`.`nodes` section:: - - my_floating_ip: - type: cloudify.openstack.nodes.FloatingIP - interfaces: - cloudify.interfaces.lifecycle: - create: - inputs: - args: - floating_network_name: Ext-Net - - - my_network: - type: cloudify.openstack.nodes.Network - properties: - resource_id: my_network_openstack_name - - - my_subnet: - type: cloudify.openstack.nodes.Subnet - properties: - resource_id: my_subnet_openstack_name - interfaces: - cloudify.interfaces.lifecycle: - create: - inputs: - args: - cidr: 1.2.3.0/24 - ip_version: 4 - cloudify.interfaces.validation: - creation: - inputs: - args: - cidr: 1.2.3.0/24 - ip_version: 4 - relationships: - - target: my_network - type: cloudify.relationships.contained_in - - - my_security_group: - type: cloudify.openstack.nodes.SecurityGroup - properties: - resource_id: my_security_group_openstack_name - rules: - - remote_ip_prefix: 0.0.0.0/0 - port: 8080 - - - my_server: - type: cloudify.openstack.nodes.Server - properties: - resource_id: my_server_openstack_name - interfaces: - cloudify.interfaces.lifecycle: - create: - inputs: - args: - image: 8672f4c6-e33d-46f5-b6d8-ebbeba12fa02 - flavor: 101 - cloudify.interfaces.validation: - creation: - inputs: - args: - image: 8672f4c6-e33d-46f5-b6d8-ebbeba12fa02 - flavor: 101 - relationships: - - target: my_network - type: cloudify.relationships.connected_to - - target: my_subnet - type: cloudify.relationships.depends_on - - target: my_floating_ip - type: cloudify.openstack.server_connected_to_floating_ip - - target: my_security_group - type: cloudify.openstack.server_connected_to_security_group - - -1. Creates a floating IP, whose node name is ``my_floating_ip``, and whose floating_network_name is ``Ext-Net`` (This value represents the name of the external network). -2. Creates a network, whose node name is ``my_network``, and whose name on Openstack is ``my_network_openstack_name``. -3. Creates a subnet, whose node name is ``my_subnet``, and whose name on Openstack is ``my_subnet_openstack_name``. The subnet's address range is defined to be 1.2.3.0 - 1.2.3.255 using the ``cidr`` parameter, and the subnet's IP version is set to version 4. The subnet will be set on the ``my_network_openstack_name`` network because of the relationship to the ``my_network`` node. -4. Creates a security_group, whose node name is ``my_security_group``, and whose name on Openstack is ``my_security_group_openstack_Name``. The security group is set with a single rule, which allows all traffic (since we use the address range ``0.0.0.0/0``) to port ``8080`` (default direction is *ingress*). -5. Creates a server, whose node name is ``my_server``, and whose name on openstack is ``my_server_openstack_name``. The server is set with an image and flavor IDs. The server is set with multiple relationships: - - - A relationship to the ``my_network`` node: Through this relationship, - the server will be automatically placed on the ``my_network_openstack_name`` network. - - A relationship to the ``my_subnet`` node: - This relationship is strictly for ensuring the order of creation is correct, - as the server requires the ``my_subnet_openstack_name`` subnet to exist before it can be created on it. - - A relationship to the ``my_floating_ip`` node: - This designated relationship type will take care of associating the server with the floating IP represented by the ``my_floating_ip`` node. - - A relationship with the ``my_security_group`` node: - This relationship will take care of setting the server up with the security group represented by the ``my_security_group`` node. - - -Example II ----------- - -This example will show how to use the ``router`` and ``port`` types, as well as some of the relationships that were missing from Example I. - -We'll see how to create a server connected to a port, where the port is set on a subnet in a network, and has a security group set on it. Finally, we'll see how this subnet connects to a router and from there to the external network. - - -The following is an excerpt from the blueprint's ``blueprint``.``node_templates`` section:: - - my_network: - type: cloudify.openstack.nodes.Network - properties: - resource_id: my_network_openstack_name - - - my_security_group: - type: cloudify.openstack.nodes.SecurityGroup - properties: - resource_id: my_security_group_openstack_name - rules: - - remote_ip_prefix: 0.0.0.0/0 - port: 8080 - - - my_subnet: - type: cloudify.openstack.nodes.Subnet - properties: - resource_id: my_subnet_openstack_name - interfaces: - cloudify.interfaces.lifecycle: - create: - inputs: - args: - cidr: 1.2.3.0/24 - ip_version: 4 - cloudify.interfaces.validation: - creation: - inputs: - args: - cidr: 1.2.3.0/24 - ip_version: 4 - relationships: - - target: my_network - type: cloudify.relationships.contained_in - - target: my_router - type: cloudify.openstack.subnet_connected_to_router - - - my_port: - type: cloudify.openstack.nodes.Port - properties: - resource_id: my_port_openstack_name - relationships: - - target: my_network - type: cloudify.relationships.contained_in - - target: my_subnet - type: cloudify.relationships.depends_on - - target: my_security_group - type: cloudify.openstack.port_connected_to_security_group - - - my_router: - type: cloudify.openstack.nodes.Router - properties: - resource_id: my_router_openstack_Name - - - my_server: - type: cloudify.openstack.nodes.Server - properties: - cloudify_agent: - user: ubuntu - interfaces: - cloudify.interfaces.lifecycle: - create: - inputs: - args: - image: 8672f4c6-e33d-46f5-b6d8-ebbeba12fa02 - flavor: 101 - cloudify.interfaces.validation: - creation: - inputs: - args: - image: 8672f4c6-e33d-46f5-b6d8-ebbeba12fa02 - flavor: 101 - relationships: - - target: my_port - type: cloudify.openstack.server_connected_to_port - - -1. Creates a network. See Example I for more information. - -2. Creates a security group. See Example I for more information. - -3. Creates a subnet. This is again similar to what we've done in Example I. The difference here is that the subnet has an extra relationship set towards a router. - -4. Creates a port, whose node name is ``my_port``, and whose name on Openstack is ``my_port_openstack_name``. The port is set with multiple relationships: - - - A relationship to the ``my_network`` node: Through this relationship, the port will be automatically placed on the ``my_network_openstack_name`` network. - - A relationship to the ``my_subnet`` node: This relationship is strictly for ensuring the order of creation is correct, as the port requires the ``my_subnet_openstack_name`` subnet to exist before it can be created on it. - - A relationship to the ``my_security_group`` node: This designated relationship type will take care of setting the ``my_security_group_openstack_name`` security group on the port. - -5. Creates a router, whose node name is ``my_router``, and whose name on Openstack is ``my_router_openstack_name``. The router will automatically have an interface in the external network. - -6. Creates a server, whose node name is ``my_server``, and whose name on Openstack is **the node's ID** (since no ``name`` parameter was supplied under the ``server`` property). The server is set with an image and flavor IDs. It also overrides the ``cloudify_agent`` property of its parent type to set the username that will be used to connect to the server for installing the Cloudify agent on it. Finally, it is set with a relationship to the ``my_port`` node: This designated relationship type will take care of connecting the server to ``my_port_openstack_name``. - - -Example III ------------ - -This example will show how to use the ``volume`` type, as well as ``volume_attached_to_server`` relationship. - -The following is an excerpt from the blueprint's ``blueprint``.``node_templates`` section:: - - my_server: - type: cloudify.openstack.nodes.Server - properties: - cloudify_agent: - user: ubuntu - interfaces: - cloudify.interfaces.lifecycle: - create: - inputs: - args: - image: 8672f4c6-e33d-46f5-b6d8-ebbeba12fa02 - flavor: 101 - cloudify.interfaces.validation: - creation: - inputs: - args: - image: 8672f4c6-e33d-46f5-b6d8-ebbeba12fa02 - flavor: 101 - - my_volume: - type: cloudify.openstack.nodes.Volume - properties: - resource_id: my_openstack_volume_name - device_name: /dev/vdb - interfaces: - cloudify.interfaces.lifecycle: - create: - inputs: - args: - size: 1 - relationships: - - target: my_server - type: cloudify.openstack.volume_attached_to_server - - -1. Creates a server, with name ``my_server``, and with name on Openstack **the node's ID** (since no ``name`` parameter was supplied under the ``server`` property). The server is set with an image and flavor IDs. -2. Creates a volume. It is set with a relationship to the ``my_server`` node: This designated relationship type will take care of attaching the volume to Openstack server node. - - - -Example IV ----------- - -This example will show how to use a Windows server with a Cloudify agent on it. - - -The following is an excerpt from the blueprint's ``blueprint``.``node_templates`` section:: - - my_keypair: - type: cloudify.openstack.nodes.KeyPair - properties: - private_key_path: /tmp/windows-test.pem - - my_server: - type: cloudify.openstack.nodes.WindowsServer - relationships: - - type: cloudify.openstack.server_connected_to_keypair - target: keypair - interfaces: - cloudify.interfaces.lifecycle: - create: - inputs: - args: - server: - image: 8672f4c6-e33d-46f5-b6d8-ebbeba12fa02 - flavor: 101 - name: my-server - userdata: | - #ps1_sysnative - winrm quickconfig -q - winrm set winrm/config/winrs '@{MaxMemoryPerShellMB="300"}' - winrm set winrm/config '@{MaxTimeoutms="1800000"}' - winrm set winrm/config/service '@{AllowUnencrypted="true"}' - winrm set winrm/config/service/auth '@{Basic="true"}' - &netsh advfirewall firewall add rule name="WinRM 5985" protocol=TCP dir=in localport=5985 action=allow - &netsh advfirewall firewall add rule name="WinRM 5986" protocol=TCP dir=in localport=5986 action=allow - - msiexec /i https://www.python.org/ftp/python/2.7.6/python-2.7.6.msi TARGETDIR=C:\Python27 ALLUSERS=1 /qn - cloudify.interfaces.validation: - creation: - inputs: - args: - server: - image: 8672f4c6-e33d-46f5-b6d8-ebbeba12fa02 - flavor: 101 - name: my-server - userdata: | - #ps1_sysnative - winrm quickconfig -q - winrm set winrm/config/winrs '@{MaxMemoryPerShellMB="300"}' - winrm set winrm/config '@{MaxTimeoutms="1800000"}' - winrm set winrm/config/service '@{AllowUnencrypted="true"}' - winrm set winrm/config/service/auth '@{Basic="true"}' - &netsh advfirewall firewall add rule name="WinRM 5985" protocol=TCP dir=in localport=5985 action=allow - &netsh advfirewall firewall add rule name="WinRM 5986" protocol=TCP dir=in localport=5986 action=allow - - msiexec /i https://www.python.org/ftp/python/2.7.6/python-2.7.6.msi TARGETDIR=C:\Python27 ALLUSERS=1 /qn - cloudify.interfaces.worker_installer: - install: - inputs: - cloudify_agent: - user: Admin - password: { get_attribute: [SELF, password] } - - -1. Creates a keypair. the private key will be saved under ``/tmp/windows-test.pem``. -2. Creates a Windows server: - - * It is set with a relationship to the ``my_keypair`` node, which will make the server use the it as a public key for authentication, and also use this public key to encrypt its password before posting it to the Openstack metadata service. - * The worker-installer interface operations are given values for the user and password for the ``cloudify_agent`` input - the password uses the [get_attribute]({{< relref "blueprints/spec-intrinsic-functions.md#get-attribute" >}}) feature to retrieve the decrypted password from the Server's runtime properties (Note that in this example, only the ``install`` operation was given with this input, but all of the worker installer operations as well as the plugin installer operations should be given with it). - * We define custom userdata which configures WinRM and installs Python on the machine (Windows Server 2012 in this example) once it's up. This is required for the Cloudify agent to be installed on the machine. - - diff --git a/docs/index.rst b/docs/index.rst deleted file mode 100644 index 9d435295..00000000 --- a/docs/index.rst +++ /dev/null @@ -1,64 +0,0 @@ -.. cloudify-cli documentation master file, created by - sphinx-quickstart on Thu Jun 12 15:30:03 2014. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Cloudify Openstack Plugin -========================= - -The OpenStack plugin allows users to use an OpenStack based cloud infrastructure for deploying services and applications. -For more information about OpenStack, please refer to: https://www.openstack.org/. - - -Contents: - -.. toctree:: - :maxdepth: 2 - - configuration - types - nova-net - examples - misc - changelog - - -Plugin Requirements -------------------- - -* Python versions: - - * 2.7.x -* If the plugin is installed from source, - then the following system dependencies are required: - - * ``gcc`` - * ``gcc-c++`` - * ``python-devel`` - - -Compatibility -------------- - -* *Mitaka* official support -* *Liberty* official support -* *Kilo* official support -* *Juno*, *Icehouse* previously supported, not currently tested. - -The Openstack plugin uses various Openstack clients packages. The versions used in Openstack Plugin are as follows: - -* `keystoneauth1 `_ - 2.12.1 -* `Keystone client `_ - 3.5.0 -* `Nova client `_ - 7.0.0 -* `Neutron client `_ - 6.0.0 -* `Cinder client `_ - 1.9.0 -* `Glance client `_ - 2.5.0 - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` - diff --git a/docs/misc.rst b/docs/misc.rst deleted file mode 100644 index 7ba5c849..00000000 --- a/docs/misc.rst +++ /dev/null @@ -1,121 +0,0 @@ - -.. highlight:: yaml - -Tips -==== - -* It is highly recommended to **ensure that Openstack names are unique** (for a given type): While Openstack allows for same name objects, having identical names for objects of the same type might lead to ambiguities and errors. - -* To set up DNS servers for Openstack servers (whether it's the Cloudify Manager or application VMs), one may use the Openstack ``dns_nameservers`` parameter for the [Subnet type](#cloudifyopenstacknodessubnet) - that is, pass the parameter directly to Neutron by using the ``args`` input of the operations in Subnet node, e.g.:: - - my_subnet_node: - interfaces: - cloudify.interfaces.lifecycle: - create: - inputs: - args: - dns_nameservers: [1.2.3.4] - cloudify.interfaces.validation: - creation: - inputs: - args: - dns_nameservers: [1.2.3.4] - - This will set up ``1.2.3.4`` as the DNS server for all servers on this subnet. - -* Public keys, unlike the rest of the Openstack resources, are user-based rather than tenant-based. When errors indicate a missing keypair, make sure you're using the correct user rather than tenant. - -* ICMP rules show up on Horizon (Openstack GUI) as ones defined using ``type`` and ``code`` fields, rather than a port range. However, in the actual Neutron (and Nova, in case of Nova-net security groups) service, these fields are represented using the standard port range fields (i.e., ``type`` and ``code`` correspond to ``port_range_min`` and ``port_range_max`` (respectively) on Neutron security groups, and to ``from_port`` and ``to_port`` (respectively) on Nova-net security groups). - - ** For example, to set a security group rule which allows **ping** from anywhere, the following setting may be declared in the blueprint: - * ``protocol``: ``icmp`` - * ``port_range_min``: ``0`` (type) - * ``port_range_max``: ``0`` (code) - * ``remote_ip_prefix``: ``0.0.0.0/0`` - -* To use Openstack Neutron's ML2 extensions, use the ``args`` input for the Network's ``create`` operation. For example, the `provider network `_ may be set in the following way:: - - my_network: - type: cloudify.openstack.nodes.Network - ... - interfaces: - cloudify.interfaces.lifecycle: - create: - inputs: - args: - # Note that for this parameter to work, OpenStack must be configured to use Neutron's ML2 extensions - provider:network_type: vxlan - -* Ordering NICs in the Openstack plugin can be done in the 1.4 version of the Openstack plugin by simply stating the relationships to the various networks (or ports) in the desired order, e.g.:: - - node_templates: - server: - type: cloudify.openstack.nodes.Server - relationships: - - target: network1 - type: cloudify.relationships.connected_to - - target: network2 - type: cloudify.relationships.connected_to - - network1: - type: cloudify.openstack.nodes.Network - properties: - resource_id: network1 - - network2: - type: cloudify.openstack.nodes.Network - properties: - resource_id: network2 - - In the example above, network1 will be connected to a NIC preceding the one network2 will - however these wont be eth0/eth1, but rather eth1/eth2 - because by default, the management network will be prepended to the networks list (i.e. it'll be assigned to eth0). - To avoid this prepending, one should explicitly declare a relationship to the management network, where the network's represented in the blueprint by an existing resource (using the "use_external_resource" property). - This will cause the management network adhere the NICs ordering as the rest of them. - Example:: - - node_templates: - server: - type: cloudify.openstack.nodes.Server - properties: - management_network_name: network2 - relationships: - - target: network1 - type: cloudify.relationships.connected_to - - target: network2 - type: cloudify.relationships.connected_to - - target: network3 - type: cloudify.relationships.connected_to - - network1: - type: cloudify.openstack.nodes.Network - properties: - resource_id: network1 - - network2: - type: cloudify.openstack.nodes.Network - properties: - use_external_resource: true - resource_id: network2 - - network3: - type: cloudify.openstack.nodes.Network - properties: - use_external_resource: true - resource_id: network3 - - In this example, "network2" represents the management network, yet it'll be connected to eth1, while "network1" will take eth0, and "network3" (which also happened to already exist) will get connected to eth2. - - The server's property "management_network_name: network2" is not mandatory for this to work - this was just to make the example clear - yet the management network can also be inferred from the provider context (which is what happens when this property isn't explicitly set). Were the provider context to have "network2" set as the management network, this example would've worked just the same with this property omitted. - -Misc -==== - -* The plugin's operations are each **transactional** - (and therefore also retryable on failures), - yet not **idempotent**. - Attempting to execute the same operation twice is likely to fail. - -* Over this documentation, it's been mentioned multiple times that some configuration-saving information may be available in the Provider Context. - The Openstack manager blueprint and Openstack provider both create this relevant information, - and therefore if either was used for bootstrapping, the Provider Context will be available for the Openstack plugin to use. - -The exact details of the structure of the Openstack Provider Context are not documented since this feature is going through deprecation and will be replaced with a more advanced one. diff --git a/docs/nova-net.rst b/docs/nova-net.rst deleted file mode 100644 index dccf360c..00000000 --- a/docs/nova-net.rst +++ /dev/null @@ -1,48 +0,0 @@ - -Nova-net Support -================ - -The Openstack plugin includes support for Nova-net mode - -i.e. an Openstack installation which does not have the Networking API -(Neutron service). - -In such an environment, there is but a single preconfigured private network, -which all servers make use of automatically. -There are no subnets, networks, routers or ports. -Since these resource types don't exist, -the plugin's equivalent types aren't valid to use in such an environment. - -There are, however, some resource types whose API is available via both the Nova and Neutron services - These had originally been on the Nova service, -and later were moved and got extended implementation in the Neutron one, -but were also kept in the Nova service for backward compatibility. - -For these resource types, the Openstack plugin defines two separate types - one in the plugin's standard types namespace (``cloudify.openstack.nodes.XXX``), -which uses the newer and extended API via the Neutron service; -and Another in a special namespace (``cloudify.openstack.nova_net.nodes.XXX``), -which uses the older API via the Nova service. -This is why you may notice two separate types defined for [Floating](#cloudifyopenstacknodesfloatingip) [IP](#cloudifyopenstacknovanetnodesfloatingip), -as well as for [Security](#cloudifyopenstacknodessecuritygroup) [Group](#cloudifyopenstacknovanetnodessecuritygroup). - - -To summarize, ensure that when working in a Nova-net Openstack environment, -Neutron types aren't used - these include all types whose resources' APIs are natively available only via the Network API, -as well as the types which are in the ``cloudify.openstack.nova_net.Nodes`` namespace. - -On the opposite side, when using an Openstack environment which supports Neutron, -it's recommended to use the Neutron-versions of the relevant types -(i.e. avoid any types defined under the -``cloudify.openstack.nova_net.Nodes`` namespace), -as they offer more advanced capabilities. -However, it's important to mention that this is not required, -and using the Nova-versions of some types in a Neutron-enabled environment is possible and will work as well. - - -Nova-net Node Types -------------------- - - -.. cfy:node:: cloudify.openstack.nova_net.nodes.FloatingIP - - -.. cfy:node:: cloudify.openstack.nova_net.nodes.SecurityGroup - diff --git a/docs/requirements.txt b/docs/requirements.txt deleted file mode 100644 index 07de519b..00000000 --- a/docs/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -git+https://github.com/cloudify-cosmo/sphinxify.git diff --git a/docs/types.rst b/docs/types.rst deleted file mode 100644 index c5a20402..00000000 --- a/docs/types.rst +++ /dev/null @@ -1,199 +0,0 @@ - -.. highlight:: yaml - -Types -^^^^^ - -Node Types -========== - -.. cfy:node:: cloudify.openstack.nodes.Server - - An OpenStack server. - - -.. cfy:node:: cloudify.openstack.nodes.WindowsServer - - This type has the same properties and operations-mapping as the type above (as it derives from it), yet it overrides some of the agent and plugin installations operations-mapping derived from the built-in cloudify.nodes.Compute type. Use this type when working with a Windows server. - - Additionally, the default value for the use_password property is overridden for this type, and is set to true. When using an image with a preset password, it should be modified to false. - - -.. cfy:node:: cloudify.openstack.nodes.KeyPair - - -.. cfy:node:: cloudify.openstack.nodes.Image - - -.. cfy:node:: cloudify.openstack.nodes.SecurityGroup - - -.. cfy:node:: cloudify.openstack.nodes.Router - - -.. cfy:node:: cloudify.openstack.nodes.Port - - -.. cfy:node:: cloudify.openstack.nodes.Network - - -.. cfy:node:: cloudify.openstack.nodes.Subnet - - -.. cfy:node:: cloudify.openstack.nodes.FloatingIP - - -.. cfy:node:: cloudify.openstack.nodes.Volume - - -.. cfy:node:: cloudify.openstack.nodes.Project - - -.. cfy:node:: cloudify.openstack.nodes.User - - -.. cfy:node:: cloudify.openstack.nodes.ServerGroup - -.. cfy:node:: cloudify.openstack.nodes.Flavor - - -.. cfy:node:: cloudify.openstack.nodes.HostAggregate - - -Types' Common Behaviors -======================= - -Validations ------------ - -All types offer the same base functionality for the ``cloudify.interfaces.validation.creation`` interface operation: - - * If it's a new resource (``use_external_resource`` is set to ``false``), the basic validation is to verify there's enough quota to allocate a new resource of the given type. - - * When [using an existing resource](#using-existing-resources), the validation ensures the resource indeed exists. - - -Runtime Properties ------------------- - -Node instances of any of the types defined in this plugin get set with the following runtime properties during the ``cloudify.interfaces.lifecycle.create`` operation: - - * ``external_id`` the Openstack ID of the resource - * ``external_type`` the Openstack type of the resource - * ``external_name`` the Openstack name of the resource - -The only exceptions are the two *floating-ip* types - Since floating-ip objects on Openstack don't have a name, the ``external_name`` runtime property is replaced with the ``floating_ip_address`` one, which holds the object's actual IP address. - - -Default Resource Naming Convention ----------------------------------- - -When creating a new resource (i.e. ``use_external_resource`` is set to ``false``), its name on Openstack will be the value of its ``resource_id`` property. However, if this value is not provided, the name will default to the following schema: - -``__`` - -For example, if a server node is defined as so:: - - node_templates: - myserver: - type: cloudify.openstack.nodes.Server - ... - -Yet without setting the ``resource_id`` property, then the server's name on Openstack will be ``server_my-deployment_myserver_XXXXX`` (where the XXXXX is the autogenerated part of the node instance's ID). - - - -Using Existing Resources ------------------------- - -It is possible to use existing resources on Openstack - whether these have been created by a different Cloudify deployment or not via Cloudify at all. - -All Cloudify Openstack types have a property named ``use_external_resource``, whose default value is ``false``. When set to ``true``, the plugin will apply different semantics for each of the operations executed on the relevant node's instances. Specifically, in the case of the ``cloudify.interfaces.lifecycle.create`` operation, rather than creating a new resource on Openstack of the given type, the plugin will behave as follows: - -1. Try to find an existing resource on Openstack whose name (or IP, in the case of one of the **floating-ip** types) is the value specified for the ``resource_id`` property. If more than one is found, an error is raised. - -2. If no resource was found, the plugin will use the value of the ``resource_id`` property to look for the resource by ID instead. If a resource still isn't found, an error is raised. - -3. If a single resource was found, the plugin will use that resource, and set the node instance with the appropriate runtime properties according to the resource's data. - - -The semantics of other operations are affected as well: - -* The ``cloudify.interfaces.lifecycle.start`` operation, where applicable, will only validate that the resource is indeed started, raising an error if it isn't. - -* The ``cloudify.interfaces.lifecycle.stop`` operation, where applicable, won't have any effect. - -* The ``cloudify.interfaces.lifecycle.delete`` operation will not actually delete the resource from Openstack (but will clear the runtime properties from the node instance). - -* The ``cloudify.interfaces.validation.creation`` operation will verify that a resource with the given name or ID indeed exists, or otherwise print a list of all available resources of the given type. - -* The ``cloudify.interfaces.relationship_lifecycle.establish`` operation will behave as normal if the related node is not set with ``use_external_resource`` as ``true``; However if both nodes have this property set to ``true``, the operation will only attempt to verify that they're indeed "connected" on Openstack as well ("connected" in this case also refers to a security-group imposed on a server, floating-ip associated with a server, etc.). - - -Notes ------ - -* As mentioned in the [Relationships section](#relationships), some relationships take effect in non-relationship operations. When ``use_external_resource`` is set to ``true``, the existence of such connections is validated as well. - -* Using an existing resource only makes sense for single-instance nodes. - - - - -Relationships -============= - - Not all relationships have built-in types - (i.e., some types may simply get connected using standard Cloudify relationships such as ``cloudify.relationships.connected_to``). - - Some relationships take effect in non-relationship operations, - e.g. a subnet which is connected to a network actually gets connected on subnet's creation - (in the ``cloudify.interfaces.lifecycle.create`` operation) - and not in a ``cloudify.interfaces.relationship_lifecycle.establish`` operation - this occurs whenever the connection information is required on resource creation. - - -.. cfy:rel:: cloudify.openstack.server_connected_to_port - - A relationship for connecting a server to a port. The server will use this relationship to automatically connect to the port upon server creation. - - -.. cfy:rel:: cloudify.openstack.port_connected_to_security_group - - A relationship for a port to a security group. - - -.. cfy:rel:: cloudify.openstack.server_connected_to_keypair - - -.. cfy:rel:: cloudify.openstack.port_connected_to_subnet - - A relationship for connecting a port to a subnet. This is useful when a network has multiple subnets, and a port should belong to a specific subnet on that network. The port will then receive some IP from that given subnet. - - Note that when using this relationship in combination with the port type's property `fixed_ip`, the IP given should be on the CIDR of the subnet connected to the port. - - *Note*: This relationship has no operations associated with it; The port will use this relationship to automatically connect to the subnet upon port creation. - - -.. cfy:rel:: cloudify.openstack.server_connected_to_security_group - - A relationship for setting a security group on a server. - - -.. cfy:rel:: cloudify.openstack.subnet_connected_to_router - - A relationship for connecting a subnet to a router. - - -.. cfy:rel:: cloudify.openstack.port_connected_to_floating_ip - - A relationship for associating a floating ip with a port. If that port is later connected to a server, the server will be accessible via the floating IP. - - -.. cfy:rel:: cloudify.openstack.server_connected_to_floating_ip - - A relationship for associating a floating ip with a server. - - -.. cfy:rel:: cloudify.openstack.volume_attached_to_server - - A relationship for attaching a volume to a server. diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 00000000..795fa251 --- /dev/null +++ b/examples/README.md @@ -0,0 +1,7 @@ +# Cloudify openstack Plugin Examples + +These blueprints are used primarily for testing use cases. + +The local blueprints are using for testing in a special environment. They have several syntactical peculiarities and are not therefore representative of best practices. + +The manager blueprints are good for learning best practices. diff --git a/examples/cloudify-hello-world-example/inputs.yaml b/examples/cloudify-hello-world-example/inputs.yaml new file mode 100644 index 00000000..f0e1b47f --- /dev/null +++ b/examples/cloudify-hello-world-example/inputs.yaml @@ -0,0 +1,13 @@ +# Auth Config +auth_url: https://rackspace-api-test.co:5000/v2.0 +username: XXXXXX +password: XXXXXX +project_name: XXXXXX +region_name: RegionOne + +# Network Config +external_network_id: dda079ce-12cf-4309-879a-8e67aec94de4 + +# Flavor/Image Config +image: e41430f7-9131-495b-927f-e7dc4b8994c8 +flavor: 2 diff --git a/examples/cloudify-hello-world-example/install-script.yaml b/examples/cloudify-hello-world-example/install-script.yaml new file mode 100644 index 00000000..f1a09a0d --- /dev/null +++ b/examples/cloudify-hello-world-example/install-script.yaml @@ -0,0 +1,54 @@ +inputs: + + install_script: + type: string + default: | + #!/bin/bash + apt-get update + apt-get install apache2 apache2-doc apache2-utils -y + service apache2 restart + a2dissite 000-default.conf + mkdir -p /var/www/example.com/{logs,public_html} + touch /var/www/example.com/logs/access.log /var/www/example.com/logs/error.log + cat < /var/www/example.com/public_html/index.html + +
+ Cloudify Hello World +
+ + +

Hello, World!

+ + + + EOF + curl -o /var/www/example.com/public_html/image.png https://cloudify.co/wp-content/uploads/2018/01/cloudify_logo-wh-trans.png + cat < /etc/apache2/sites-available/example.com.conf + + ServerAdmin webmaster@example.com + ServerName example.com + ServerAlias www.example.com + DocumentRoot /var/www/example.com/public_html/ + ErrorLog /var/www/example.com/logs/error.log + CustomLog /var/www/example.com/logs/access.log combined + + EOF + a2ensite example.com.conf + service apache2 restart \ No newline at end of file diff --git a/examples/cloudify-hello-world-example/openstack.yaml b/examples/cloudify-hello-world-example/openstack.yaml new file mode 100644 index 00000000..e6e1b339 --- /dev/null +++ b/examples/cloudify-hello-world-example/openstack.yaml @@ -0,0 +1,238 @@ +tosca_definitions_version: cloudify_dsl_1_3 + +imports: + - http://www.getcloudify.org/spec/cloudify/4.5/types.yaml + # For use with a manager use: plugin:cloudify-openstack-plugin + - plugin.yaml + - install-script.yaml + +inputs: + + auth_url: + type: string + default: { get_secret: keystone_url } + + username: + type: string + default: { get_secret: keystone_username } + + password: + type: string + default: { get_secret: keystone_password } + + project_name: + type: string + default: { get_secret: keystone_tenant_name } + + region_name: + type: string + + external_network_id: + type: string + description: A network that provisions floating IPs. + + image: + type: string + description: The UUID of an Ubuntu Trusty 14.04 image in your Openstack account. + + flavor: + type: string + description: The UUID of small flavor in your Openstack account. + + network_name: + type: string + description: A name for your new Openstack Network. + default: cloudify-hello-world-network + + subnet_name: + type: string + description: A name for your new Openstack Subnet. + default: cloudify-hello-world-subnet + + name_prefix: + type: string + default: hellow_world + + +dsl_definitions: + + client_config: &client_config + auth_url: { get_input: auth_url } + username: { get_input: username } + password: { get_input: password } + project_name: { get_input: project_name } + region_name: { get_input: region_name } + +node_templates: + + external-network: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + use_external_resource: true + resource_config: + id: { get_input: external_network_id } + kwargs: + routing: + external: true + + vm: + type: cloudify.nodes.openstack.Server + properties: + client_config: *client_config + agent_config: + install_method: none + resource_config: + name: { concat: [ { get_input: name_prefix }, '_server' ] } + user_data: { get_input: install_script } + image_id: { get_input: image } + flavor_id: { get_input: flavor } + relationships: + - type: cloudify.relationships.openstack.server_connected_to_port + target: port + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + port: + type: cloudify.nodes.openstack.Port + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, '_port' ] } + fixed_ips: + - subnet_id: { get_attribute: [ subnet, id ] } + relationships: + - type: cloudify.relationships.contained_in + target: network + - type: cloudify.relationships.depends_on + target: subnet + - type: cloudify.relationships.connected_to + target: security_group + - type: cloudify.relationships.openstack.port_connected_to_floating_ip + target: ip + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + subnet: + type: cloudify.nodes.openstack.Subnet + properties: + client_config: *client_config + resource_config: + name: { get_input: subnet_name } + ip_version: 4 + cidr: 10.10.4.0/24 + dns_nameservers: [8.8.4.4, 8.8.8.8] + allocation_pools: + - start: 10.10.4.2 + end: 10.10.4.254 + relationships: + - type: cloudify.relationships.contained_in + target: network + - type: cloudify.relationships.openstack.subnet_connected_to_router + target: router + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + network: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + resource_config: + name: { get_input: network_name } + + router: + type: cloudify.nodes.openstack.Router + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, '_router' ] } + relationships: + - type: cloudify.relationships.connected_to + target: external-network + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + security_group: + type: cloudify.nodes.openstack.SecurityGroup + properties: + client_config: *client_config + security_group_rules: + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 80 + port_range_min: 80 + direction: ingress + protocol: tcp + + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 80 + port_range_min: 80 + direction: egress + protocol: tcp + + - remote_ip_prefix: 0.0.0.0/0 + port_range_min: 53333 + port_range_max: 53333 + protocol: tcp + direction: ingress + + - remote_ip_prefix: 0.0.0.0/0 + port_range_min: 53333 + port_range_max: 53333 + protocol: tcp + direction: egress + + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 22 + port_range_min: 22 + direction: ingress + protocol: tcp + + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 22 + port_range_min: 22 + direction: egress + protocol: tcp + + resource_config: + name: { concat: [ { get_input: name_prefix }, 'agent_sg' ] } + description: 'A security group created by Cloudify OpenStack SDK plugin.' + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + ip: + type: cloudify.nodes.openstack.FloatingIP + properties: + client_config: *client_config + relationships: + - type: cloudify.relationships.connected_to + target: external-network + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + +outputs: + + application_endpoint: + description: The external endpoint of the application. + value: { concat: [ 'http://', { get_attribute: [ ip, floating_ip_address ] }, ':80' ] } diff --git a/examples/local/blueprint.yaml b/examples/local/blueprint.yaml new file mode 100644 index 00000000..f2fd64ed --- /dev/null +++ b/examples/local/blueprint.yaml @@ -0,0 +1,199 @@ +tosca_definitions_version: cloudify_dsl_1_3 + +imports: + - http://www.getcloudify.org/spec/cloudify/4.5/types.yaml + # For use with a manager use: plugin:cloudify-openstack-plugin + - plugin.yaml + +inputs: + + auth_url: + type: string + + username: + type: string + + password: + type: string + + region_name: + type: string + + project_name: + type: string + + external_network_id: + type: string + + example_subnet_cidr: + type: string + default: 10.0.0.0/16 + + image_id: + type: string + + flavor_id: + type: string + + name_prefix: + type: string + +dsl_definitions: + + client_config: &client_config + auth_url: { get_input: auth_url } + username: { get_input: username } + password: { get_input: password } + region_name: { get_input: region_name } + project_name: { get_input: project_name } + +node_templates: + + example-external-network: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + use_external_resource: true + resource_config: + id: { get_input: external_network_id } + kwargs: + routing: + external: true + + example-router: + type: cloudify.nodes.openstack.Router + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'router' ] } + relationships: + - type: cloudify.relationships.connected_to + target: example-external-network + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-network: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'network' ] } + + example-subnet: + type: cloudify.nodes.openstack.Subnet + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'subnet' ] } + cidr: { get_input: example_subnet_cidr } + enable_dhcp: true + ip_version: 4 + relationships: + - type: cloudify.relationships.contained_in + target: example-network + - type: cloudify.relationships.openstack.subnet_connected_to_router + target: example-router + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-security-group: + type: cloudify.nodes.openstack.SecurityGroup + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'security_group' ] } + description: 'A security group created by Cloudify OpenStack SDK plugin.' + + example-security-group-rule: + type: cloudify.nodes.openstack.SecurityGroupRule + properties: + client_config: *client_config + resource_config: + direction: ingress + protocol: tcp + port_range_max: 22 + port_range_min: 22 + security_group_id: { get_attribute: [ example-security-group, id ] } + relationships: + - type: cloudify.relationships.contained_in + target: example-security-group + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-server-port: + type: cloudify.nodes.openstack.Port + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'server-port' ] } + network_id: { get_attribute: [ example-network, id ] } + fixed_ips: + - subnet_id: { get_attribute: [ example-subnet, id ] } + relationships: + - type: cloudify.relationships.connected_to + target: example-subnet + - type: cloudify.relationships.connected_to + target: example-security-group + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-floating-ip-address: + type: cloudify.nodes.openstack.FloatingIP + properties: + client_config: *client_config + relationships: + - type: cloudify.relationships.connected_to + target: example-external-network + - type: cloudify.relationships.connected_to + target: example-server-port + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-keypair: + type: cloudify.nodes.openstack.KeyPair + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'keypair' ] } + + example-server: + type: cloudify.nodes.openstack.Server + properties: + client_config: *client_config + agent_config: + install_method: none + resource_config: + name: { concat: [ { get_input: name_prefix }, 'server' ] } + image_id: { get_input: image_id } + flavor_id: { get_input: flavor_id } + relationships: + - type: cloudify.relationships.openstack.server_connected_to_port + target: example-server-port + - type: cloudify.relationships.openstack.server_connected_to_keypair + target: example-keypair + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } diff --git a/examples/local/boot_volumes_1.yaml b/examples/local/boot_volumes_1.yaml new file mode 100644 index 00000000..1441f997 --- /dev/null +++ b/examples/local/boot_volumes_1.yaml @@ -0,0 +1,278 @@ +tosca_definitions_version: cloudify_dsl_1_3 + +imports: + - http://www.getcloudify.org/spec/cloudify/4.5/types.yaml + # For use with a manager use: plugin:cloudify-openstack-plugin + - plugin.yaml + +inputs: + + auth_url: + type: string + + username: + type: string + + password: + type: string + + region_name: + type: string + + project_name: + type: string + + project_id: + type: string + + image: + type: string + + flavor: + type: string + + name_prefix: + type: string + default: 'bootable_blueprint_' + + external_network_id: + type: string + + security_group_rules: + default: + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 22 + port_range_min: 22 + direction: ingress + protocol: tcp + + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 22 + port_range_min: 22 + direction: egress + protocol: tcp + + volume1_use_existing: + default: false + + volume1_name: + default: volume1_name + + volume1_size: + default: 10 + + example_public_subnet_cidr: + type: string + default: '10.10.0.0/24' + + example_private_subnet_cidr: + type: string + default: '10.10.1.0/24' + +dsl_definitions: + + client_config: &client_config + auth_url: { get_input: auth_url } + username: { get_input: username } + password: { get_input: password } + region_name: { get_input: region_name } + project_name: { get_input: project_name } + +node_templates: + + example-volume-booted-server: + type: cloudify.nodes.openstack.Server + properties: + client_config: *client_config + agent_config: + install_method: none + resource_config: + name: { concat: [ { get_input: name_prefix }, 'server' ] } + flavor_id: { get_input: flavor } + availability_zone: nova + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + relationships: + - type: cloudify.relationships.openstack.server_connected_to_keypair + target: example-keypair + - type: cloudify.relationships.openstack.server_connected_to_port + target: example-public-port + - type: cloudify.relationships.depends_on + target: example-volume-1 + - type: cloudify.relationships.openstack.server_connected_to_port + target: example-private-port + + example-keypair: + type: cloudify.nodes.openstack.KeyPair + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'keypair' ] } + + example-volume-1: + type: cloudify.nodes.openstack.Volume + properties: + client_config: *client_config + use_external_resource: { get_input: volume1_use_existing } + device_name: 'vda' + resource_config: + name: { concat: [ { get_input: name_prefix }, 'volume_1' ] } + availability_zone: 'nova' + description: 'Example Volume Size 1' + project_id: { get_input: project_id } + size: { get_input: volume1_size } + imageRef: { get_input: image } + + example-public-port: + type: cloudify.nodes.openstack.Port + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'public_port' ] } + fixed_ips: + - subnet_id: { get_attribute: [ example-public-subnet, id ] } + relationships: + - type: cloudify.relationships.contained_in + target: example-public-network + - type: cloudify.relationships.depends_on + target: example-public-subnet + - type: cloudify.relationships.connected_to + target: example-security-group + - type: cloudify.relationships.openstack.port_connected_to_floating_ip + target: example-floating-ip-address + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-private-port: + type: cloudify.nodes.openstack.Port + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'private_port' ] } + fixed_ips: + - subnet_id: { get_attribute: [ example-private-subnet, id ] } + relationships: + - type: cloudify.relationships.contained_in + target: example-private-network + - type: cloudify.relationships.depends_on + target: example-private-subnet + - type: cloudify.relationships.connected_to + target: example-security-group + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-security-group: + type: cloudify.nodes.openstack.SecurityGroup + properties: + client_config: *client_config + security_group_rules: { get_input: security_group_rules } + resource_config: + name: { concat: [ { get_input: name_prefix }, 'security_group' ] } + description: 'A security group for openstack boot volume' + + example-floating-ip-address: + type: cloudify.nodes.openstack.FloatingIP + properties: + client_config: *client_config + relationships: + - type: cloudify.relationships.connected_to + target: example-external-network + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-private-subnet: + type: cloudify.nodes.openstack.Subnet + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'private_subnet' ] } + cidr: { get_input: example_private_subnet_cidr } + ip_version: 4 + relationships: + - type: cloudify.relationships.contained_in + target: example-private-network + - type: cloudify.relationships.openstack.subnet_connected_to_router + target: example-router + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-private-network: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'private_network' ] } + + example-public-subnet: + type: cloudify.nodes.openstack.Subnet + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'public_subnet' ] } + cidr: { get_input: example_public_subnet_cidr } + ip_version: 4 + relationships: + - type: cloudify.relationships.contained_in + target: example-public-network + - type: cloudify.relationships.openstack.subnet_connected_to_router + target: example-router + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-public-network: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'public_network' ] } + + example-router: + type: cloudify.nodes.openstack.Router + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'router' ] } + relationships: + - type: cloudify.relationships.connected_to + target: example-external-network + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-external-network: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + use_external_resource: true + resource_config: + id: { get_input: external_network_id } + kwargs: + routing: + external: true diff --git a/examples/local/boot_volumes_2.yaml b/examples/local/boot_volumes_2.yaml new file mode 100644 index 00000000..7e68f7b8 --- /dev/null +++ b/examples/local/boot_volumes_2.yaml @@ -0,0 +1,312 @@ +tosca_definitions_version: cloudify_dsl_1_3 + +imports: + - http://www.getcloudify.org/spec/cloudify/4.5/types.yaml + # For use with a manager use: plugin:cloudify-openstack-plugin + - plugin.yaml + +inputs: + + auth_url: + type: string + + username: + type: string + + password: + type: string + + region_name: + type: string + + project_name: + type: string + + project_id: + type: string + + image_centos: + type: string + + image_ubuntu: + type: string + + flavor: + type: string + + name_prefix: + type: string + default: 'bootable_blueprint_' + + external_network_id: + type: string + + security_group_rules: + default: + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 22 + port_range_min: 22 + direction: ingress + protocol: tcp + + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 22 + port_range_min: 22 + direction: egress + protocol: tcp + + volume1_use_existing: + default: false + + volume1_name: + default: volume1_name + + volume1_device_name: + default: vda + + volume1_size: + default: 10 + + volume2_use_existing: + default: false + + volume2_name: + default: volume2_name + + volume2_device_name: + default: vdb + + volume2_size: + default: 10 + + example_public_subnet_cidr: + type: string + default: '10.10.0.0/24' + + example_private_subnet_cidr: + type: string + default: '10.10.1.0/24' + +dsl_definitions: + + client_config: &client_config + auth_url: { get_input: auth_url } + username: { get_input: username } + password: { get_input: password } + region_name: { get_input: region_name } + project_name: { get_input: project_name } + +node_templates: + + example-volume-booted-server: + type: cloudify.nodes.openstack.Server + properties: + client_config: *client_config + agent_config: + install_method: none + resource_config: + name: { concat: [ { get_input: name_prefix }, 'server' ] } + flavor_id: { get_input: flavor } + availability_zone: nova + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + relationships: + - type: cloudify.relationships.openstack.server_connected_to_keypair + target: example-keypair + - type: cloudify.relationships.openstack.server_connected_to_port + target: example-public-port + - type: cloudify.relationships.depends_on + target: example-volume-1 + - type: cloudify.relationships.depends_on + target: example-volume-2 + - type: cloudify.relationships.openstack.server_connected_to_port + target: example-private-port + + example-keypair: + type: cloudify.nodes.openstack.KeyPair + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'keypair' ] } + + example-volume-1: + type: cloudify.nodes.openstack.Volume + properties: + client_config: *client_config + use_external_resource: { get_input: volume1_use_existing } + device_name: { get_input: volume1_device_name } + resource_config: + name: { concat: [ { get_input: name_prefix }, 'volume_1' ] } + availability_zone: 'nova' + description: 'Example Volume Size 1' + project_id: { get_input: project_id } + size: { get_input: volume1_size } + imageRef: { get_input: image_centos } + + example-volume-2: + type: cloudify.nodes.openstack.Volume + properties: + client_config: *client_config + use_external_resource: { get_input: volume1_use_existing } + device_name: { get_input: volume2_device_name } + resource_config: + name: { concat: [ { get_input: name_prefix }, 'volume_2' ] } + description: 'Example Volume Size 2' + project_id: { get_input: project_id } + size: { get_input: volume2_size } + imageRef: { get_input: image_ubuntu } + availability_zone: 'nova' + + example-public-port: + type: cloudify.nodes.openstack.Port + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'public_port' ] } + fixed_ips: + - subnet_id: { get_attribute: [ example-public-subnet, id ] } + relationships: + - type: cloudify.relationships.contained_in + target: example-public-network + - type: cloudify.relationships.depends_on + target: example-public-subnet + - type: cloudify.relationships.connected_to + target: example-security-group + - type: cloudify.relationships.openstack.port_connected_to_floating_ip + target: example-floating-ip-address + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-private-port: + type: cloudify.nodes.openstack.Port + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'private_port' ] } + fixed_ips: + - subnet_id: { get_attribute: [ example-private-subnet, id ] } + relationships: + - type: cloudify.relationships.contained_in + target: example-private-network + - type: cloudify.relationships.depends_on + target: example-private-subnet + - type: cloudify.relationships.connected_to + target: example-security-group + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-security-group: + type: cloudify.nodes.openstack.SecurityGroup + properties: + client_config: *client_config + security_group_rules: { get_input: security_group_rules } + resource_config: + name: { concat: [ { get_input: name_prefix }, 'security_group' ] } + description: 'A security group for openstack boot volume' + + example-floating-ip-address: + type: cloudify.nodes.openstack.FloatingIP + properties: + client_config: *client_config + relationships: + - type: cloudify.relationships.connected_to + target: example-external-network + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-private-subnet: + type: cloudify.nodes.openstack.Subnet + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'private_subnet' ] } + cidr: { get_input: example_private_subnet_cidr } + ip_version: 4 + relationships: + - type: cloudify.relationships.contained_in + target: example-private-network + - type: cloudify.relationships.openstack.subnet_connected_to_router + target: example-router + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-private-network: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'private_network' ] } + + example-public-subnet: + type: cloudify.nodes.openstack.Subnet + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'public_subnet' ] } + cidr: { get_input: example_public_subnet_cidr } + ip_version: 4 + relationships: + - type: cloudify.relationships.contained_in + target: example-public-network + - type: cloudify.relationships.openstack.subnet_connected_to_router + target: example-router + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-public-network: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'public_network' ] } + + example-router: + type: cloudify.nodes.openstack.Router + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'router' ] } + relationships: + - type: cloudify.relationships.connected_to + target: example-external-network + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-external-network: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + use_external_resource: true + resource_config: + id: { get_input: external_network_id } + kwargs: + routing: + external: true diff --git a/examples/local/boot_volumes_3.yaml b/examples/local/boot_volumes_3.yaml new file mode 100644 index 00000000..d5d6b59f --- /dev/null +++ b/examples/local/boot_volumes_3.yaml @@ -0,0 +1,281 @@ +tosca_definitions_version: cloudify_dsl_1_3 + +imports: + - http://www.getcloudify.org/spec/cloudify/4.5/types.yaml + # For use with a manager use: plugin:cloudify-openstack-plugin + - plugin.yaml + +inputs: + + auth_url: + type: string + + username: + type: string + + password: + type: string + + region_name: + type: string + + project_name: + type: string + + project_id: + type: string + + flavor: + type: string + + name_prefix: + type: string + default: 'bootable_blueprint_' + + external_network_id: + type: string + + security_group_rules: + default: + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 22 + port_range_min: 22 + direction: ingress + protocol: tcp + + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 22 + port_range_min: 22 + direction: egress + protocol: tcp + + volume1_use_existing: + default: true + + volume1_external_id: + type: string + + volume1_device_name: + default: vda + + volume2_use_existing: + default: true + + volume2_external_id: + type: string + + volume2_device_name: + default: vdb + + example_public_subnet_cidr: + type: string + default: '10.10.0.0/24' + + example_private_subnet_cidr: + type: string + default: '10.10.1.0/24' + +dsl_definitions: + + client_config: &client_config + auth_url: { get_input: auth_url } + username: { get_input: username } + password: { get_input: password } + region_name: { get_input: region_name } + project_name: { get_input: project_name } + +node_templates: + + example-volume-booted-server: + type: cloudify.nodes.openstack.Server + properties: + client_config: *client_config + agent_config: + install_method: none + resource_config: + name: { concat: [ { get_input: name_prefix }, 'server' ] } + flavor_id: { get_input: flavor } + availability_zone: nova + kwargs: + block_device_mapping_v2: + - device_name: { get_input: volume1_device_name } + boot_index: 0 + uuid: { get_input: volume1_external_id } + source_type: volume + delete_on_termination: false + - device_name: { get_input: volume2_device_name } + boot_index: 1 + uuid: { get_input: volume2_external_id } + source_type: volume + delete_on_termination: false + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + relationships: + - type: cloudify.relationships.openstack.server_connected_to_keypair + target: example-keypair + - type: cloudify.relationships.openstack.server_connected_to_port + target: example-public-port + - type: cloudify.relationships.openstack.server_connected_to_port + target: example-private-port + + example-keypair: + type: cloudify.nodes.openstack.KeyPair + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'keypair' ] } + + example-public-port: + type: cloudify.nodes.openstack.Port + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'public_port' ] } + fixed_ips: + - subnet_id: { get_attribute: [ example-public-subnet, id ] } + relationships: + - type: cloudify.relationships.contained_in + target: example-public-network + - type: cloudify.relationships.depends_on + target: example-public-subnet + - type: cloudify.relationships.connected_to + target: example-security-group + - type: cloudify.relationships.openstack.port_connected_to_floating_ip + target: example-floating-ip-address + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-private-port: + type: cloudify.nodes.openstack.Port + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'private_port' ] } + fixed_ips: + - subnet_id: { get_attribute: [ example-private-subnet, id ] } + relationships: + - type: cloudify.relationships.contained_in + target: example-private-network + - type: cloudify.relationships.depends_on + target: example-private-subnet + - type: cloudify.relationships.connected_to + target: example-security-group + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-security-group: + type: cloudify.nodes.openstack.SecurityGroup + properties: + client_config: *client_config + security_group_rules: { get_input: security_group_rules } + resource_config: + name: { concat: [ { get_input: name_prefix }, 'security_group' ] } + description: 'A security group for openstack boot volume' + + example-floating-ip-address: + type: cloudify.nodes.openstack.FloatingIP + properties: + client_config: *client_config + relationships: + - type: cloudify.relationships.connected_to + target: example-external-network + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-private-subnet: + type: cloudify.nodes.openstack.Subnet + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'private_subnet' ] } + cidr: { get_input: example_private_subnet_cidr } + ip_version: 4 + relationships: + - type: cloudify.relationships.contained_in + target: example-private-network + - type: cloudify.relationships.openstack.subnet_connected_to_router + target: example-router + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-private-network: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'private_network' ] } + + example-public-subnet: + type: cloudify.nodes.openstack.Subnet + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'public_subnet' ] } + network_id: { get_attribute: [ example-public-network, id ] } + cidr: { get_input: example_public_subnet_cidr } + ip_version: 4 + relationships: + - type: cloudify.relationships.contained_in + target: example-public-network + - type: cloudify.relationships.openstack.subnet_connected_to_router + target: example-router + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-public-network: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'public_network' ] } + + example-router: + type: cloudify.nodes.openstack.Router + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'router' ] } + relationships: + - type: cloudify.relationships.connected_to + target: example-external-network + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-external-network: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + use_external_resource: true + resource_config: + id: { get_input: external_network_id } + kwargs: + routing: + external: true diff --git a/examples/local/disabled_egress_rule_security_group.yaml b/examples/local/disabled_egress_rule_security_group.yaml new file mode 100644 index 00000000..12404042 --- /dev/null +++ b/examples/local/disabled_egress_rule_security_group.yaml @@ -0,0 +1,65 @@ +tosca_definitions_version: cloudify_dsl_1_3 + +imports: + - http://www.getcloudify.org/spec/cloudify/4.5/types.yaml + - plugin.yaml + +inputs: + + auth_url: + type: string + + username: + type: string + + password: + type: string + + region_name: + type: string + + project_name: + type: string + + security_group_config: + default: + name: 'cloudify_security_group' + description: 'A security group created by Cloudify OpenStack SDK plugin.' + + security_group_rules_config: + default: + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 80 + port_range_min: 80 + direction: ingress + protocol: tcp + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 80 + port_range_min: 80 + direction: egress + protocol: tcp + +dsl_definitions: + + client_config: &client_config + auth_url: { get_input: auth_url } + username: { get_input: username } + password: { get_input: password } + region_name: { get_input: region_name } + project_name: { get_input: project_name } + +node_templates: + + example-security-group: + type: cloudify.nodes.openstack.SecurityGroup + properties: + client_config: *client_config + disable_default_egress_rules: true + security_group_rules: { get_input: security_group_rules_config } + resource_config: { get_input: security_group_config} + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } diff --git a/examples/local/external_server.yaml b/examples/local/external_server.yaml new file mode 100644 index 00000000..28c763bc --- /dev/null +++ b/examples/local/external_server.yaml @@ -0,0 +1,255 @@ +tosca_definitions_version: cloudify_dsl_1_3 + +imports: + - http://www.getcloudify.org/spec/cloudify/4.5/types.yaml + # For use with a manager use: plugin:cloudify-openstack-plugin + - plugin.yaml + +inputs: + + auth_url: + type: string + + username: + type: string + + password: + type: string + + region_name: + type: string + + project_name: + type: string + + name_prefix: + type: string + default: 'external-server-blueprint_' + description: Prefix string for all resources + + external_network_id: + type: string + description: A network that provisions floating IPs. + + external_key_pair_id: + type: string + description: An external keypair already created + + external_server_id: + type: string + description: An external server already provisioned + + example_subnet_cidr_1: + type: string + description: Subnet 1 cidr + default: '172.16.0.0/16' + + example_subnet_cidr_2: + type: string + description: Subnet 2 cidr + default: '192.168.0.0/16' + +dsl_definitions: + client_config: &client_config + auth_url: { get_input: auth_url } + username: { get_input: username } + password: { get_input: password } + region_name: { get_input: region_name } + project_name: { get_input: project_name } + +node_templates: + + example-external-network: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + use_external_resource: true + resource_config: + id: { get_input: external_network_id } + kwargs: + routing: + external: true + + example-external-server: + type: cloudify.nodes.openstack.Server + properties: + client_config: *client_config + use_external_resource: true + agent_config: + install_method: none + resource_config: + id: { get_input: external_server_id } + relationships: + - type: cloudify.relationships.openstack.server_connected_to_port + target: example-port + - type: cloudify.relationships.connected_to + target: example-network-2 + - type: cloudify.relationships.openstack.server_connected_to_keypair + target: example-external-keypair + + example-external-keypair: + type: cloudify.nodes.openstack.KeyPair + properties: + use_external_resource: true + client_config: *client_config + resource_config: + id: { get_input: external_key_pair_id } + + example-router: + type: cloudify.nodes.openstack.Router + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'router' ] } + relationships: + - type: cloudify.relationships.connected_to + target: example-external-network + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-security-group: + type: cloudify.nodes.openstack.SecurityGroup + properties: + client_config: *client_config + security_group_rules: + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 80 + port_range_min: 80 + direction: ingress + protocol: tcp + + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 80 + port_range_min: 80 + direction: egress + protocol: tcp + + - remote_ip_prefix: 0.0.0.0/0 + port_range_min: 53333 + port_range_max: 53333 + protocol: tcp + direction: ingress + + - remote_ip_prefix: 0.0.0.0/0 + port_range_min: 53333 + port_range_max: 53333 + protocol: tcp + direction: egress + + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 22 + port_range_min: 22 + direction: ingress + protocol: tcp + + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 22 + port_range_min: 22 + direction: egress + protocol: tcp + resource_config: + name: { concat: [ { get_input: name_prefix }, 'security-group' ] } + description: 'A security group created by Cloudify OpenStack SDK plugin.' + + example-port: + type: cloudify.nodes.openstack.Port + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'port' ] } + fixed_ips: + - subnet_id: { get_attribute: [ example-subnet-1, id ] } + relationships: + - type: cloudify.relationships.connected_to + target: example-security-group + - type: cloudify.relationships.connected_to + target: example-network-1 + - type: cloudify.relationships.connected_to + target: example-subnet-1 + - type: cloudify.relationships.openstack.port_connected_to_floating_ip + target: example-ip + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + cloudify.interfaces.operations: + list: + inputs: + query: + all_tenants: false + + example-network-1: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'network-1' ] } + + example-subnet-1: + type: cloudify.nodes.openstack.Subnet + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'subnet-1' ] } + cidr: { get_input: example_subnet_cidr_1 } + enable_dhcp: true + ip_version: 4 + relationships: + - type: cloudify.relationships.contained_in + target: example-network-1 + - type: cloudify.relationships.openstack.subnet_connected_to_router + target: example-router + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-network-2: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'network-2' ] } + + example-subnet-2: + type: cloudify.nodes.openstack.Subnet + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'subnet-2' ] } + cidr: { get_input: example_subnet_cidr_2 } + enable_dhcp: true + ip_version: 4 + relationships: + - type: cloudify.relationships.contained_in + target: example-network-2 + - type: cloudify.relationships.openstack.subnet_connected_to_router + target: example-router + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-ip: + type: cloudify.nodes.openstack.FloatingIP + properties: + client_config: *client_config + relationships: + - type: cloudify.relationships.connected_to + target: example-external-network + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } diff --git a/examples/local/flavor.yaml b/examples/local/flavor.yaml new file mode 100644 index 00000000..19abc0dd --- /dev/null +++ b/examples/local/flavor.yaml @@ -0,0 +1,55 @@ +tosca_definitions_version: cloudify_dsl_1_3 + +imports: + - http://www.getcloudify.org/spec/cloudify/4.5/types.yaml + # For use with a manager use: plugin:cloudify-openstack-plugin + - plugin.yaml + +inputs: + + auth_url: + type: string + + username: + type: string + + password: + type: string + + region_name: + type: string + + project_name: + type: string + + flavor_config: + default: + name: { concat: [ { get_input: name_prefix }, 'flavor' ] } + ram: 4024 + disk: 8 + vcpus: 2 + + name_prefix: + type: string + default: compute_ + + user_domain_id: + type: string + default: default + +dsl_definitions: + + client_config: &client_config + auth_url: { get_input: auth_url } + username: { get_input: username } + password: { get_input: password } + region_name: { get_input: region_name } + project_name: { get_input: project_name } + +node_templates: + + example-flavor: + type: cloudify.nodes.openstack.Flavor + properties: + client_config: *client_config + resource_config: { get_input: flavor_config } diff --git a/examples/local/floating_ip_1.yaml b/examples/local/floating_ip_1.yaml new file mode 100644 index 00000000..17173db5 --- /dev/null +++ b/examples/local/floating_ip_1.yaml @@ -0,0 +1,167 @@ +tosca_definitions_version: cloudify_dsl_1_3 + +imports: + - http://www.getcloudify.org/spec/cloudify/4.5/types.yaml + # For use with a manager use: plugin:cloudify-openstack-plugin + - plugin.yaml + +inputs: + + auth_url: + type: string + + username: + type: string + + password: + type: string + + region_name: + type: string + + project_name: + type: string + + name_prefix: + type: string + default: 'floating-ip-blueprint_' + description: Prefix string for all resources + + external_network_id: + type: string + description: A network that provisions floating IPs. + + example_subnet_cidr: + type: string + default: 10.0.0.0/16 + + +dsl_definitions: + client_config: &client_config + auth_url: { get_input: auth_url } + username: { get_input: username } + password: { get_input: password } + region_name: { get_input: region_name } + project_name: { get_input: project_name } + +node_templates: + + example-external-network: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + use_external_resource: true + resource_config: + id: { get_input: external_network_id } + kwargs: + routing: + external: true + + example-router: + type: cloudify.nodes.openstack.Router + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'router' ] } + relationships: + - type: cloudify.relationships.connected_to + target: example-external-network + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-network: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'network' ] } + + example-subnet: + type: cloudify.nodes.openstack.Subnet + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'subnet' ] } + cidr: { get_input: example_subnet_cidr } + enable_dhcp: true + ip_version: 4 + relationships: + - type: cloudify.relationships.contained_in + target: example-network + - type: cloudify.relationships.openstack.subnet_connected_to_router + target: example-router + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-security-group: + type: cloudify.nodes.openstack.SecurityGroup + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'security_group' ] } + description: 'A security group created by Cloudify OpenStack SDK plugin.' + + example-security-group-rule: + type: cloudify.nodes.openstack.SecurityGroupRule + properties: + client_config: *client_config + resource_config: + direction: ingress + protocol: tcp + port_range_max: 22 + port_range_min: 22 + security_group_id: { get_attribute: [ example-security-group, id ] } + relationships: + - type: cloudify.relationships.contained_in + target: example-security-group + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-server-port: + type: cloudify.nodes.openstack.Port + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'server-port' ] } + network_id: { get_attribute: [ example-network, id ] } + fixed_ips: + - subnet_id: { get_attribute: [ example-subnet, id ] } + relationships: + - type: cloudify.relationships.connected_to + target: example-subnet + - type: cloudify.relationships.connected_to + target: example-security-group + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-ip: + type: cloudify.nodes.openstack.FloatingIP + properties: + client_config: *client_config + relationships: + - type: cloudify.relationships.connected_to + target: example-external-network + - type: cloudify.relationships.connected_to + target: example-server-port + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } diff --git a/examples/local/floating_ip_2.yaml b/examples/local/floating_ip_2.yaml new file mode 100644 index 00000000..05fbfac1 --- /dev/null +++ b/examples/local/floating_ip_2.yaml @@ -0,0 +1,60 @@ +tosca_definitions_version: cloudify_dsl_1_3 + +imports: + - http://www.getcloudify.org/spec/cloudify/4.5/types.yaml + # For use with a manager use: plugin:cloudify-openstack-plugin + - plugin.yaml + +inputs: + + auth_url: + type: string + + username: + type: string + + password: + type: string + + region_name: + type: string + + project_name: + type: string + + name_prefix: + type: string + default: 'floating-ip-blueprint_' + description: Prefix string for all resources + + external_network_name: + type: string + description: A network that provisions floating IPs. + + example_subnet_cidr: + type: string + default: 10.0.0.0/16 + + +dsl_definitions: + client_config: &client_config + auth_url: { get_input: auth_url } + username: { get_input: username } + password: { get_input: password } + region_name: { get_input: region_name } + project_name: { get_input: project_name } + +node_templates: + + example-ip: + type: cloudify.nodes.openstack.FloatingIP + properties: + client_config: *client_config + resource_config: + floating_network_name: { get_input: external_network_name } + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } diff --git a/examples/local/floating_ip_3.yaml b/examples/local/floating_ip_3.yaml new file mode 100644 index 00000000..791b9994 --- /dev/null +++ b/examples/local/floating_ip_3.yaml @@ -0,0 +1,167 @@ +tosca_definitions_version: cloudify_dsl_1_3 + +imports: + - http://www.getcloudify.org/spec/cloudify/4.5/types.yaml + # For use with a manager use: plugin:cloudify-openstack-plugin + - plugin.yaml + +inputs: + + auth_url: + type: string + + username: + type: string + + password: + type: string + + region_name: + type: string + + project_name: + type: string + + name_prefix: + type: string + default: 'floating-ip-blueprint_' + description: Prefix string for all resources + + external_network_id: + type: string + description: A network that provisions floating IPs. + + example_subnet_cidr: + type: string + default: 10.0.0.0/16 + + +dsl_definitions: + client_config: &client_config + auth_url: { get_input: auth_url } + username: { get_input: username } + password: { get_input: password } + region_name: { get_input: region_name } + project_name: { get_input: project_name } + +node_templates: + + example-external-network: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + use_external_resource: true + resource_config: + id: { get_input: external_network_id } + kwargs: + routing: + external: true + + example-router: + type: cloudify.nodes.openstack.Router + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'router' ] } + relationships: + - type: cloudify.relationships.connected_to + target: example-external-network + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-network: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'network' ] } + + example-subnet: + type: cloudify.nodes.openstack.Subnet + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'subnet' ] } + cidr: { get_input: example_subnet_cidr } + enable_dhcp: true + ip_version: 4 + relationships: + - type: cloudify.relationships.contained_in + target: example-network + - type: cloudify.relationships.openstack.subnet_connected_to_router + target: example-router + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-security-group: + type: cloudify.nodes.openstack.SecurityGroup + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'security_group' ] } + description: 'A security group created by Cloudify OpenStack SDK plugin.' + + example-security-group-rule: + type: cloudify.nodes.openstack.SecurityGroupRule + properties: + client_config: *client_config + resource_config: + direction: ingress + protocol: tcp + port_range_max: 22 + port_range_min: 22 + security_group_id: { get_attribute: [ example-security-group, id ] } + relationships: + - type: cloudify.relationships.contained_in + target: example-security-group + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-server-port: + type: cloudify.nodes.openstack.Port + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'server-port' ] } + network_id: { get_attribute: [ example-network, id ] } + fixed_ips: + - subnet_id: { get_attribute: [ example-subnet, id ] } + relationships: + - type: cloudify.relationships.connected_to + target: example-subnet + - type: cloudify.relationships.connected_to + target: example-security-group + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-ip: + type: cloudify.nodes.openstack.FloatingIP + properties: + client_config: *client_config + resource_config: + floating_network_id: { get_input: external_network_id } + relationships: + - type: cloudify.relationships.connected_to + target: example-server-port + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } diff --git a/examples/local/host_aggregate.yaml b/examples/local/host_aggregate.yaml new file mode 100644 index 00000000..3abb7a4f --- /dev/null +++ b/examples/local/host_aggregate.yaml @@ -0,0 +1,51 @@ +tosca_definitions_version: cloudify_dsl_1_3 + +imports: + - http://www.getcloudify.org/spec/cloudify/4.5/types.yaml + # For use with a manager use: plugin:cloudify-openstack-plugin + - plugin.yaml + +inputs: + + auth_url: + type: string + + username: + type: string + + password: + type: string + + region_name: + type: string + + project_name: + type: string + + name_prefix: + type: string + default: compute_ + + host_aggregate_config: + default: + name: { concat: [ { get_input: name_prefix }, 'host_aggregate' ] } + availability_zone: nova + +dsl_definitions: + + client_config: &client_config + auth_url: { get_input: auth_url } + username: { get_input: username } + password: { get_input: password } + region_name: { get_input: region_name } + project_name: { get_input: project_name } + +node_templates: + + example-host-aggregate: + type: cloudify.nodes.openstack.HostAggregate + properties: + client_config: *client_config + metadata: + ssd: 'True' + resource_config: { get_input: host_aggregate_config } diff --git a/examples/local/image.yaml b/examples/local/image.yaml new file mode 100644 index 00000000..cf476f8f --- /dev/null +++ b/examples/local/image.yaml @@ -0,0 +1,49 @@ +tosca_definitions_version: cloudify_dsl_1_3 + +imports: + - http://www.getcloudify.org/spec/cloudify/4.5/types.yaml + # For use with a manager use: plugin:cloudify-openstack-plugin + - plugin.yaml + +inputs: + + auth_url: + type: string + + username: + type: string + + password: + type: string + + region_name: + type: string + + project_name: + type: string + + image_config: + default: + name: 'openstack-image-custom' + container_format: "bare" + disk_format: "qcow2" + # Adding image url is not supported yet +# image_url: http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1503.qcow2c + + +dsl_definitions: + + client_config: &client_config + auth_url: { get_input: auth_url } + username: { get_input: username } + password: { get_input: password } + region_name: { get_input: region_name } + project_name: { get_input: project_name } + +node_templates: + + example-image: + type: cloudify.nodes.openstack.Image + properties: + client_config: *client_config + resource_config: { get_input: image_config } diff --git a/examples/local/keypair.yaml b/examples/local/keypair.yaml new file mode 100644 index 00000000..19a48300 --- /dev/null +++ b/examples/local/keypair.yaml @@ -0,0 +1,45 @@ +tosca_definitions_version: cloudify_dsl_1_3 + +imports: + - http://www.getcloudify.org/spec/cloudify/4.5/types.yaml + # For use with a manager use: plugin:cloudify-openstack-plugin + - plugin.yaml + +inputs: + + auth_url: + type: string + + username: + type: string + + password: + type: string + + region_name: + type: string + + project_name: + type: string + + name_prefix: + type: string + default: compute_ + +dsl_definitions: + + client_config: &client_config + auth_url: { get_input: auth_url } + username: { get_input: username } + password: { get_input: password } + region_name: { get_input: region_name } + project_name: { get_input: project_name } + +node_templates: + + example-keypair: + type: cloudify.nodes.openstack.KeyPair + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'keypair' ] } diff --git a/examples/local/network.yaml b/examples/local/network.yaml new file mode 100644 index 00000000..bad5b65d --- /dev/null +++ b/examples/local/network.yaml @@ -0,0 +1,132 @@ +tosca_definitions_version: cloudify_dsl_1_3 + +imports: + - http://www.getcloudify.org/spec/cloudify/4.5/types.yaml + # For use with a manager use: plugin:cloudify-openstack-plugin + - plugin.yaml + +inputs: + + auth_url: + type: string + + username: + type: string + + password: + type: string + + region_name: + type: string + + project_name: + type: string + + example_subnet_cidr: + type: string + default: '10.0.0.0/24' + + example_fixed_ip: + type: string + default: '10.0.0.1' + + name_prefix: + type: string + default: network-blueprint_ + +dsl_definitions: + + client_config: &client_config + auth_url: { get_input: auth_url } + username: { get_input: username } + password: { get_input: password } + region_name: { get_input: region_name } + project_name: { get_input: project_name } + +node_templates: + + example-router: + type: cloudify.nodes.openstack.Router + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'router' ] } + + example-network: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'network' ] } + + example-security-group: + type: cloudify.nodes.openstack.SecurityGroup + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'security_group' ] } + description: 'A security group created by Cloudify OpenStack SDK plugin.' + + example-subnet: + type: cloudify.nodes.openstack.Subnet + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'subnet' ] } + cidr: { get_input: example_subnet_cidr } + enable_dhcp: true + ip_version: 4 + relationships: + - type: cloudify.relationships.contained_in + target: example-network + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-security-group-rule: + type: cloudify.nodes.openstack.SecurityGroupRule + properties: + client_config: *client_config + resource_config: + direction: ingress + protocol: tcp + port_range_max: 22 + port_range_min: 22 + security_group_id: { get_attribute: [ example-security-group, id ] } + relationships: + - type: cloudify.relationships.contained_in + target: example-security-group + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-port: + type: cloudify.nodes.openstack.Port + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'port' ] } + network_id: { get_attribute: [ example-network, id ] } + fixed_ips: + - subnet_id: { get_attribute: [ example-subnet, id ] } + ip_address: { get_input: example_fixed_ip } + device_id: { get_attribute: [ example-router, id ] } + relationships: + - type: cloudify.relationships.contained_in + target: example-subnet + - type: cloudify.relationships.connected_to + target: example-router + - type: cloudify.relationships.connected_to + target: example-security-group + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } diff --git a/examples/local/port_fixed_ip.yaml b/examples/local/port_fixed_ip.yaml new file mode 100644 index 00000000..54977f71 --- /dev/null +++ b/examples/local/port_fixed_ip.yaml @@ -0,0 +1,112 @@ +tosca_definitions_version: cloudify_dsl_1_3 + +imports: + - http://www.getcloudify.org/spec/cloudify/4.5/types.yaml + # For use with a manager use: plugin:cloudify-openstack-plugin + - plugin.yaml + +inputs: + + auth_url: + type: string + + username: + type: string + + password: + type: string + + region_name: + type: string + + project_name: + type: string + + network_id: + type: string + + subnet_id: + type: string + + fixed_ip: + type: string + default: '10.0.1.2' + + second_fixed_ip: + type: string + default: '10.0.1.3' + + name_prefix: + type: string + default: external-port-blueprint- + +dsl_definitions: + client_config: &client_config + auth_url: { get_input: auth_url } + username: { get_input: username } + password: { get_input: password } + region_name: { get_input: region_name } + project_name: { get_input: project_name } + +node_templates: + + example-external-port: + type: cloudify.nodes.openstack.Port + properties: + client_config: *client_config + use_external_resource: true + resource_config: + id: { get_attribute: [ example-port, id ] } + allowed_address_pairs: + - ip_address: { get_input: second_fixed_ip } + relationships: + - type: cloudify.relationships.depends_on + target: example-port + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-port: + type: cloudify.nodes.openstack.Port + properties: + client_config: *client_config + resource_config: + fixed_ips: + - ip_address: { get_input: fixed_ip } + relationships: + - type: cloudify.relationships.contained_in + target: example-private-network + - type: cloudify.relationships.depends_on + target: example-private-subnet + - type: cloudify.relationships.depends_on + target: example-security-group + + example-security-group: + type: cloudify.nodes.openstack.SecurityGroup + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'security-group' ] } + description: My Test Security Group + + example-private-subnet: + type: cloudify.nodes.openstack.Subnet + properties: + client_config: *client_config + use_external_resource: true + resource_config: + id: { get_input: subnet_id } + relationships: + - type: cloudify.relationships.contained_in + target: example-private-network + + example-private-network: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + use_external_resource: true + resource_config: + id: { get_input: network_id } diff --git a/examples/local/project.yaml b/examples/local/project.yaml new file mode 100644 index 00000000..e9c271f5 --- /dev/null +++ b/examples/local/project.yaml @@ -0,0 +1,53 @@ +tosca_definitions_version: cloudify_dsl_1_3 + +imports: + - http://www.getcloudify.org/spec/cloudify/4.5/types.yaml + # For use with a manager use: plugin:cloudify-openstack-plugin + - plugin.yaml + +inputs: + + auth_url: + type: string + + username: + type: string + + password: + type: string + + region_name: + type: string + + project_name: + type: string + + project_config: + default: + name: 'test_project' + description: 'Testing Project' + is_domain': True + +dsl_definitions: + + client_config: &client_config + auth_url: { get_input: auth_url } + username: { get_input: username } + password: { get_input: password } + region_name: { get_input: region_name } + project_name: { get_input: project_name } + +node_templates: + + example-project: + type: cloudify.nodes.openstack.Project + properties: + client_config: *client_config + resource_config: { get_input: project_config } + # Users & roles must be exist, otherwise the install workflow will fail + users: + - name: test_user + roles: + - test_role_1 + - test_role_2 + - test_role_3 diff --git a/examples/local/rbac_policy.yaml b/examples/local/rbac_policy.yaml new file mode 100644 index 00000000..7a5743aa --- /dev/null +++ b/examples/local/rbac_policy.yaml @@ -0,0 +1,187 @@ +tosca_definitions_version: cloudify_dsl_1_3 + +imports: + - http://www.getcloudify.org/spec/cloudify/4.5/types.yaml + # For use with a manager use: plugin:cloudify-openstack-plugin + - plugin.yaml + +inputs: + + auth_url: + type: string + + username: + type: string + + password: + type: string + + region_name: + type: string + + project_name: + type: string + + external_network_id: + type: string + + project_id: + type: string + + example_subnet_cidr: + type: string + default: '10.0.0.0/24' + + example_fixed_ip: + type: string + default: 10.0.0.1 + + name_prefix: + type: string + default: 'rbac_policy' + + +dsl_definitions: + client_config: &client_config + auth_url: { get_input: auth_url } + username: { get_input: username } + password: { get_input: password } + region_name: { get_input: region_name } + project_name: { get_input: project_name } + +node_templates: + + example-external-network: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + use_external_resource: true + resource_config: + id: { get_input: external_network_id } + kwargs: + routing: + external: true + + example-router: + type: cloudify.nodes.openstack.Router + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'router' ] } + relationships: + - type: cloudify.relationships.connected_to + target: example-external-network + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-rbac-policy: + type: cloudify.nodes.openstack.RBACPolicy + properties: + client_config: *client_config + resource_config: + target_tenant: { get_input: project_id } + action: access_as_shared + relationships: + - type: cloudify.relationships.openstack.rbac_policy_applied_to + target_interfaces: + cloudify.interfaces.relationship_lifecycle: + unlink: + inputs: + disable_dhcp: + default: true + target: example-network + + example-network: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'network' ] } + + example-subnet: + type: cloudify.nodes.openstack.Subnet + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'subnet' ] } + cidr: { get_input: example_subnet_cidr } + enable_dhcp: true + ip_version: 4 + relationships: + - type: cloudify.relationships.contained_in + target: example-network + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-security-group: + type: cloudify.nodes.openstack.SecurityGroup + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'security_group' ] } + description: 'A security group created by Cloudify OpenStack SDK plugin.' + + example-security-group-rule: + type: cloudify.nodes.openstack.SecurityGroupRule + properties: + client_config: *client_config + resource_config: + direction: ingress + protocol: tcp + port_range_max: 22 + port_range_min: 22 + security_group_id: { get_attribute: [ example-security-group, id ] } + relationships: + - type: cloudify.relationships.contained_in + target: example-security-group + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-port: + type: cloudify.nodes.openstack.Port + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'port' ] } + network_id: { get_attribute: [ example-network, id ] } + fixed_ips: + - subnet_id: { get_attribute: [ example-subnet, id ] } + ip_address: { get_input: example_fixed_ip } + device_id: { get_attribute: [ example-router, id ] } + relationships: + - type: cloudify.relationships.contained_in + target: example-subnet + - type: cloudify.relationships.connected_to + target: example-router + - type: cloudify.relationships.connected_to + target: example-security-group + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + +outputs: + network: + value: { get_attribute: [ example-network, id ] } + subnet: + value: { get_attribute: [ example-subnet, id ] } + network_rbac_policy: + value: { get_attribute: [ example-rbac-policy, id ] } + port: + value: { get_attribute: [ example-port, id ] } + security-group: + value: { get_attribute: [ example-security-group, id ] } diff --git a/examples/local/routes.yaml b/examples/local/routes.yaml new file mode 100644 index 00000000..cf5fa447 --- /dev/null +++ b/examples/local/routes.yaml @@ -0,0 +1,165 @@ +tosca_definitions_version: cloudify_dsl_1_3 + +imports: + - http://www.getcloudify.org/spec/cloudify/4.5/types.yaml + # For use with a manager use: plugin:cloudify-openstack-plugin + - plugin.yaml + +inputs: + + auth_url: + type: string + + username: + type: string + + password: + type: string + + region_name: + type: string + + project_name: + type: string + + image: + type: string + description: The UUID of an Ubuntu Trusty 14.04 image in your Openstack account. + + flavor: + type: string + description: The UUID of small flavor in your Openstack account. + + external_network_id: + type: string + description: A network that provisions floating IPs. + + example_subnet_1_cidr: + type: string + default: 10.10.4.0/24 + + example_subnet_2_cidr: + type: string + default: 192.168.123.0/24 + + name_prefix: + type: string + default: routes_example_ + +dsl_definitions: + + client_config: &client_config + auth_url: { get_input: auth_url } + username: { get_input: username } + password: { get_input: password } + region_name: { get_input: region_name } + project_name: { get_input: project_name } + +node_templates: + + example-external-network: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + use_external_resource: true + resource_config: + id: { get_input: external_network_id } + kwargs: + routing: + external: true + + example-routes: + type: cloudify.nodes.openstack.Router + properties: + client_config: *client_config + use_external_resource: true + resource_config: + id: { get_attribute: [ example-router, id ] } + relationships: + - type: cloudify.relationships.depends_on + target: example-subnet-1 + - type: cloudify.relationships.depends_on + target: example-subnet-2 + interfaces: + # This is only run on local mode. For use with a manager, it can be commented out. + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + start: + inputs: + routes: + - destination: 10.10.4.0/24 + nexthop: 192.168.123.123 + + example-router: + type: cloudify.nodes.openstack.Router + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'router' ] } + relationships: + - type: cloudify.relationships.connected_to + target: example-external-network + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-network-1: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'network_1' ] } + + example-network-2: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'network_2' ] } + + example-subnet-1: + type: cloudify.nodes.openstack.Subnet + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'subnet_1' ] } + cidr: { get_input: example_subnet_1_cidr } + enable_dhcp: true + ip_version: 4 + relationships: + - type: cloudify.relationships.contained_in + target: example-network-1 + - type: cloudify.relationships.openstack.subnet_connected_to_router + target: example-router + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-subnet-2: + type: cloudify.nodes.openstack.Subnet + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'subnet_2' ] } + cidr: { get_input: example_subnet_2_cidr } + enable_dhcp: true + ip_version: 4 + relationships: + - type: cloudify.relationships.contained_in + target: example-network-2 + - type: cloudify.relationships.openstack.subnet_connected_to_router + target: example-router + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } diff --git a/examples/local/security_group.yaml b/examples/local/security_group.yaml new file mode 100644 index 00000000..fd6a9fde --- /dev/null +++ b/examples/local/security_group.yaml @@ -0,0 +1,67 @@ +tosca_definitions_version: cloudify_dsl_1_3 + +imports: + - http://www.getcloudify.org/spec/cloudify/4.5/types.yaml + # For use with a manager use: plugin:cloudify-openstack-plugin + - plugin.yaml + +inputs: + + auth_url: + type: string + + username: + type: string + + password: + type: string + + region_name: + type: string + + project_name: + type: string + + security_group_config: + default: + name: 'cloudify_security_group' + description: 'A security group created by Cloudify OpenStack SDK plugin.' + + security_group_rules_config: + default: + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 80 + port_range_min: 80 + direction: ingress + protocol: tcp + security_group_id: { get_attribute: [ SELF, id ] } + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 80 + port_range_min: 80 + direction: egress + protocol: tcp + security_group_id: { get_attribute: [ SELF, id ] } + +dsl_definitions: + + client_config: &client_config + auth_url: { get_input: auth_url } + username: { get_input: username } + password: { get_input: password } + region_name: { get_input: region_name } + project_name: { get_input: project_name } + +node_templates: + + example-security-group: + type: cloudify.nodes.openstack.SecurityGroup + properties: + client_config: *client_config + security_group_rules: { get_input: security_group_rules_config } + resource_config: { get_input: security_group_config} + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } diff --git a/examples/local/server-floating-ip-connection.yaml b/examples/local/server-floating-ip-connection.yaml new file mode 100644 index 00000000..bdf5df90 --- /dev/null +++ b/examples/local/server-floating-ip-connection.yaml @@ -0,0 +1,227 @@ +tosca_definitions_version: cloudify_dsl_1_3 + +imports: + - http://www.getcloudify.org/spec/cloudify/4.5/types.yaml + # For use with a manager use: plugin:cloudify-openstack-plugin + - plugin.yaml + +inputs: + + auth_url: + type: string + + username: + type: string + + password: + type: string + + region_name: + type: string + + project_name: + type: string + + image: + type: string + description: The UUID of an image in your Openstack account. + + flavor: + type: string + description: The UUID of small flavor in your Openstack account. + + name_prefix: + type: string + default: 'server-blueprint_' + description: Prefix string for all resources + + external_network_id: + type: string + description: A network that provisions floating IPs. + + example_subnet_cidr_1: + type: string + description: Subnet 1 cidr + default: '172.16.0.0/16' + + +dsl_definitions: + client_config: &client_config + auth_url: { get_input: auth_url } + username: { get_input: username } + password: { get_input: password } + region_name: { get_input: region_name } + project_name: { get_input: project_name } + +node_templates: + + example-external-network: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + use_external_resource: true + resource_config: + id: { get_input: external_network_id } + kwargs: + routing: + external: true + + example-server: + type: cloudify.nodes.openstack.Server + properties: + client_config: *client_config + agent_config: + install_method: none + resource_config: + name: { concat: [ { get_input: name_prefix }, 'server' ] } + image_id: { get_input: image } + flavor_id: { get_input: flavor } + relationships: + - type: cloudify.relationships.openstack.server_connected_to_floating_ip + target: example-ip + - type: cloudify.relationships.openstack.server_connected_to_port + target: example-port + - type: cloudify.relationships.openstack.server_connected_to_keypair + target: example-keypair + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-keypair: + type: cloudify.nodes.openstack.KeyPair + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'keypair' ] } + + example-router: + type: cloudify.nodes.openstack.Router + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'router' ] } + relationships: + - type: cloudify.relationships.connected_to + target: example-external-network + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-security-group: + type: cloudify.nodes.openstack.SecurityGroup + properties: + client_config: *client_config + security_group_rules: + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 80 + port_range_min: 80 + direction: ingress + protocol: tcp + + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 80 + port_range_min: 80 + direction: egress + protocol: tcp + + - remote_ip_prefix: 0.0.0.0/0 + port_range_min: 53333 + port_range_max: 53333 + protocol: tcp + direction: ingress + + - remote_ip_prefix: 0.0.0.0/0 + port_range_min: 53333 + port_range_max: 53333 + protocol: tcp + direction: egress + + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 22 + port_range_min: 22 + direction: ingress + protocol: tcp + + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 22 + port_range_min: 22 + direction: egress + protocol: tcp + resource_config: + name: { concat: [ { get_input: name_prefix }, 'security-group' ] } + description: 'A security group created by Cloudify OpenStack SDK plugin.' + + example-port: + type: cloudify.nodes.openstack.Port + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'port' ] } + fixed_ips: + - subnet_id: { get_attribute: [ example-subnet-1, id ] } + relationships: + - type: cloudify.relationships.connected_to + target: example-security-group + - type: cloudify.relationships.connected_to + target: example-network-1 + - type: cloudify.relationships.connected_to + target: example-subnet-1 + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + cloudify.interfaces.operations: + list: + inputs: + query: + all_tenants: false + + example-network-1: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'network-1' ] } + + example-subnet-1: + type: cloudify.nodes.openstack.Subnet + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'subnet-1' ] } + cidr: { get_input: example_subnet_cidr_1 } + enable_dhcp: true + ip_version: 4 + relationships: + - type: cloudify.relationships.contained_in + target: example-network-1 + - type: cloudify.relationships.openstack.subnet_connected_to_router + target: example-router + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-ip: + type: cloudify.nodes.openstack.FloatingIP + properties: + client_config: *client_config + relationships: + - type: cloudify.relationships.connected_to + target: example-external-network + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } diff --git a/examples/local/server-security-group-connection.yaml b/examples/local/server-security-group-connection.yaml new file mode 100644 index 00000000..1a6af7d6 --- /dev/null +++ b/examples/local/server-security-group-connection.yaml @@ -0,0 +1,261 @@ +tosca_definitions_version: cloudify_dsl_1_3 + +imports: + - http://www.getcloudify.org/spec/cloudify/4.5/types.yaml + # For use with a manager use: plugin:cloudify-openstack-plugin + - plugin.yaml + +inputs: + + auth_url: + type: string + + username: + type: string + + password: + type: string + + region_name: + type: string + + project_name: + type: string + + image: + type: string + description: The UUID of an image in your Openstack account. + + flavor: + type: string + description: The UUID of small flavor in your Openstack account. + + name_prefix: + type: string + default: 'server-blueprint_' + description: Prefix string for all resources + + external_network_id: + type: string + description: A network that provisions floating IPs. + + example_subnet_cidr_1: + type: string + description: Subnet 1 cidr + default: '172.16.0.0/16' + + example_subnet_cidr_2: + type: string + description: Subnet 2 cidr + default: '10.0.0.0/16' + +dsl_definitions: + client_config: &client_config + auth_url: { get_input: auth_url } + username: { get_input: username } + password: { get_input: password } + region_name: { get_input: region_name } + project_name: { get_input: project_name } + +node_templates: + + example-external-network: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + use_external_resource: true + resource_config: + id: { get_input: external_network_id } + kwargs: + routing: + external: true + + example-server: + type: cloudify.nodes.openstack.Server + properties: + client_config: *client_config + agent_config: + install_method: none + resource_config: + name: { concat: [ { get_input: name_prefix }, 'server' ] } + image_id: { get_input: image } + flavor_id: { get_input: flavor } + relationships: + - type: cloudify.relationships.openstack.server_connected_to_security_group + target: example-security-group + - type: cloudify.relationships.openstack.server_connected_to_port + target: example-port + - type: cloudify.relationships.connected_to + target: example-network-2 + - type: cloudify.relationships.openstack.server_connected_to_keypair + target: example-keypair + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-keypair: + type: cloudify.nodes.openstack.KeyPair + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'keypair' ] } + + example-router: + type: cloudify.nodes.openstack.Router + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'router' ] } + relationships: + - type: cloudify.relationships.connected_to + target: example-external-network + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-security-group: + type: cloudify.nodes.openstack.SecurityGroup + properties: + client_config: *client_config + security_group_rules: + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 80 + port_range_min: 80 + direction: ingress + protocol: tcp + + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 80 + port_range_min: 80 + direction: egress + protocol: tcp + + - remote_ip_prefix: 0.0.0.0/0 + port_range_min: 53333 + port_range_max: 53333 + protocol: tcp + direction: ingress + + - remote_ip_prefix: 0.0.0.0/0 + port_range_min: 53333 + port_range_max: 53333 + protocol: tcp + direction: egress + + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 22 + port_range_min: 22 + direction: ingress + protocol: tcp + + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 22 + port_range_min: 22 + direction: egress + protocol: tcp + resource_config: + name: { concat: [ { get_input: name_prefix }, 'security-group' ] } + description: 'A security group created by Cloudify OpenStack SDK plugin.' + + example-port: + type: cloudify.nodes.openstack.Port + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'port' ] } + fixed_ips: + - subnet_id: { get_attribute: [ example-subnet-1, id ] } + relationships: + - type: cloudify.relationships.connected_to + target: example-network-1 + - type: cloudify.relationships.connected_to + target: example-subnet-1 + - type: cloudify.relationships.openstack.port_connected_to_floating_ip + target: example-ip + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + cloudify.interfaces.operations: + list: + inputs: + query: + all_tenants: false + + example-network-1: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'network-1' ] } + + example-subnet-1: + type: cloudify.nodes.openstack.Subnet + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'subnet-1' ] } + cidr: { get_input: example_subnet_cidr_1 } + enable_dhcp: true + ip_version: 4 + relationships: + - type: cloudify.relationships.contained_in + target: example-network-1 + - type: cloudify.relationships.openstack.subnet_connected_to_router + target: example-router + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-network-2: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'network-2' ] } + + example-subnet-2: + type: cloudify.nodes.openstack.Subnet + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'subnet-2' ] } + cidr: { get_input: example_subnet_cidr_2 } + enable_dhcp: true + ip_version: 4 + relationships: + - type: cloudify.relationships.contained_in + target: example-network-2 + - type: cloudify.relationships.openstack.subnet_connected_to_router + target: example-router + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-ip: + type: cloudify.nodes.openstack.FloatingIP + properties: + client_config: *client_config + relationships: + - type: cloudify.relationships.connected_to + target: example-external-network + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } diff --git a/examples/local/server.yaml b/examples/local/server.yaml new file mode 100644 index 00000000..2dab95c1 --- /dev/null +++ b/examples/local/server.yaml @@ -0,0 +1,261 @@ +tosca_definitions_version: cloudify_dsl_1_3 + +imports: + - http://www.getcloudify.org/spec/cloudify/4.5/types.yaml + # For use with a manager use: plugin:cloudify-openstack-plugin + - plugin.yaml + +inputs: + + auth_url: + type: string + + username: + type: string + + password: + type: string + + region_name: + type: string + + project_name: + type: string + + image: + type: string + description: The UUID of an image in your Openstack account. + + flavor: + type: string + description: The UUID of small flavor in your Openstack account. + + name_prefix: + type: string + default: 'server-blueprint_' + description: Prefix string for all resources + + external_network_id: + type: string + description: A network that provisions floating IPs. + + example_subnet_cidr_1: + type: string + description: Subnet 1 cidr + default: '172.16.0.0/16' + + example_subnet_cidr_2: + type: string + description: Subnet 2 cidr + default: '10.0.0.0/16' + +dsl_definitions: + client_config: &client_config + auth_url: { get_input: auth_url } + username: { get_input: username } + password: { get_input: password } + region_name: { get_input: region_name } + project_name: { get_input: project_name } + +node_templates: + + example-external-network: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + use_external_resource: true + resource_config: + id: { get_input: external_network_id } + kwargs: + routing: + external: true + + example-server: + type: cloudify.nodes.openstack.Server + properties: + client_config: *client_config + agent_config: + install_method: none + resource_config: + name: { concat: [ { get_input: name_prefix }, 'server' ] } + image_id: { get_input: image } + flavor_id: { get_input: flavor } + relationships: + - type: cloudify.relationships.openstack.server_connected_to_port + target: example-port + - type: cloudify.relationships.connected_to + target: example-network-2 + - type: cloudify.relationships.openstack.server_connected_to_keypair + target: example-keypair + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-keypair: + type: cloudify.nodes.openstack.KeyPair + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'keypair' ] } + + example-router: + type: cloudify.nodes.openstack.Router + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'router' ] } + relationships: + - type: cloudify.relationships.connected_to + target: example-external-network + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-security-group: + type: cloudify.nodes.openstack.SecurityGroup + properties: + client_config: *client_config + security_group_rules: + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 80 + port_range_min: 80 + direction: ingress + protocol: tcp + + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 80 + port_range_min: 80 + direction: egress + protocol: tcp + + - remote_ip_prefix: 0.0.0.0/0 + port_range_min: 53333 + port_range_max: 53333 + protocol: tcp + direction: ingress + + - remote_ip_prefix: 0.0.0.0/0 + port_range_min: 53333 + port_range_max: 53333 + protocol: tcp + direction: egress + + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 22 + port_range_min: 22 + direction: ingress + protocol: tcp + + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 22 + port_range_min: 22 + direction: egress + protocol: tcp + resource_config: + name: { concat: [ { get_input: name_prefix }, 'security-group' ] } + description: 'A security group created by Cloudify OpenStack SDK plugin.' + + example-port: + type: cloudify.nodes.openstack.Port + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'port' ] } + fixed_ips: + - subnet_id: { get_attribute: [ example-subnet-1, id ] } + relationships: + - type: cloudify.relationships.connected_to + target: example-security-group + - type: cloudify.relationships.connected_to + target: example-network-1 + - type: cloudify.relationships.connected_to + target: example-subnet-1 + - type: cloudify.relationships.openstack.port_connected_to_floating_ip + target: example-ip + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + cloudify.interfaces.operations: + list: + inputs: + query: + all_tenants: false + + example-network-1: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'network-1' ] } + + example-subnet-1: + type: cloudify.nodes.openstack.Subnet + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'subnet-1' ] } + cidr: { get_input: example_subnet_cidr_1 } + enable_dhcp: true + ip_version: 4 + relationships: + - type: cloudify.relationships.contained_in + target: example-network-1 + - type: cloudify.relationships.openstack.subnet_connected_to_router + target: example-router + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-network-2: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'network-2' ] } + + example-subnet-2: + type: cloudify.nodes.openstack.Subnet + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'subnet-2' ] } + cidr: { get_input: example_subnet_cidr_2 } + enable_dhcp: true + ip_version: 4 + relationships: + - type: cloudify.relationships.contained_in + target: example-network-2 + - type: cloudify.relationships.openstack.subnet_connected_to_router + target: example-router + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-ip: + type: cloudify.nodes.openstack.FloatingIP + properties: + client_config: *client_config + relationships: + - type: cloudify.relationships.connected_to + target: example-external-network + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } diff --git a/examples/local/server_group.yaml b/examples/local/server_group.yaml new file mode 100644 index 00000000..ad78275f --- /dev/null +++ b/examples/local/server_group.yaml @@ -0,0 +1,50 @@ +tosca_definitions_version: cloudify_dsl_1_3 + +imports: + - http://www.getcloudify.org/spec/cloudify/4.5/types.yaml + # For use with a manager use: plugin:cloudify-openstack-plugin + - plugin.yaml + +inputs: + + auth_url: + type: string + + username: + type: string + + password: + type: string + + region_name: + type: string + + project_name: + type: string + + server_group_config: + default: + name: { concat: [ { get_input: name_prefix }, 'server_group' ] } + policies: + - anti-affinity + + name_prefix: + type: string + default: compute_ + + +dsl_definitions: + client_config: &client_config + auth_url: { get_input: auth_url } + username: { get_input: username } + password: { get_input: password } + region_name: { get_input: region_name } + project_name: { get_input: project_name } + +node_templates: + + example-server-group: + type: cloudify.nodes.openstack.ServerGroup + properties: + client_config: *client_config + resource_config: { get_input: server_group_config } diff --git a/examples/local/user.yaml b/examples/local/user.yaml new file mode 100644 index 00000000..8c1387c6 --- /dev/null +++ b/examples/local/user.yaml @@ -0,0 +1,49 @@ +tosca_definitions_version: cloudify_dsl_1_3 + +imports: + - http://www.getcloudify.org/spec/cloudify/4.5/types.yaml + # For use with a manager use: plugin:cloudify-openstack-plugin + - plugin.yaml + +inputs: + + auth_url: + type: string + + username: + type: string + + password: + type: string + + region_name: + type: string + + project_name: + type: string + + user_config: + default: + name: 'test-user' + description: 'Test User' + default_project_id: { get_input: project_name } + enabled: True + password: 'test1234567890' + email: 'test@test.com' + +dsl_definitions: + + client_config: &client_config + auth_url: { get_input: auth_url } + username: { get_input: username } + password: { get_input: password } + region_name: { get_input: region_name } + project_name: { get_input: project_name } + +node_templates: + + example-user: + type: cloudify.nodes.openstack.User + properties: + client_config: *client_config + resource_config: { get_input: user_config } diff --git a/examples/local/volume.yaml b/examples/local/volume.yaml new file mode 100644 index 00000000..8a96053b --- /dev/null +++ b/examples/local/volume.yaml @@ -0,0 +1,300 @@ +tosca_definitions_version: cloudify_dsl_1_3 + +imports: + - http://www.getcloudify.org/spec/cloudify/4.5/types.yaml + # For use with a manager use: plugin:cloudify-openstack-plugin + - plugin.yaml + +inputs: + + auth_url: + type: string + + username: + type: string + + password: + type: string + + region_name: + type: string + + project_name: + type: string + + project_id: + type: string + + image: + type: string + + flavor: + type: string + + name_prefix: + type: string + default: 'bootable_blueprint_' + + external_network_id: + type: string + + security_group_rules: + default: + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 22 + port_range_min: 22 + direction: ingress + protocol: tcp + + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 22 + port_range_min: 22 + direction: egress + protocol: tcp + + volume1_use_existing: + default: false + + volume1_name: + default: volume1_name + + volume1_size: + default: 1 + + volume2_use_existing: + default: false + + volume2_name: + default: volume2_name + + volume2_size: + default: 1 + + example_public_subnet_cidr: + type: string + default: '10.10.0.0/24' + + example_private_subnet_cidr: + type: string + default: '10.10.1.0/24' + +dsl_definitions: + + client_config: &client_config + auth_url: { get_input: auth_url } + username: { get_input: username } + password: { get_input: password } + region_name: { get_input: region_name } + project_name: { get_input: project_name } + +node_templates: + + example-volume-booted-server: + type: cloudify.nodes.openstack.Server + properties: + client_config: *client_config + agent_config: + install_method: none + resource_config: + name: { concat: [ { get_input: name_prefix }, 'server' ] } + image_id: { get_input: image } + flavor_id: { get_input: flavor } + availability_zone: nova + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + relationships: + - type: cloudify.relationships.openstack.server_connected_to_keypair + target: example-keypair + - type: cloudify.relationships.openstack.server_connected_to_port + target: example-public-port + - type: cloudify.relationships.openstack.server_connected_to_port + target: example-private-port + + example-keypair: + type: cloudify.nodes.openstack.KeyPair + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'keypair' ] } + + example-volume-1: + type: cloudify.nodes.openstack.Volume + properties: + client_config: *client_config + use_external_resource: { get_input: volume1_use_existing } + resource_config: + name: { concat: [ { get_input: name_prefix }, 'volume_1' ] } + description: 'Example Volume Size 1' + project_id: { get_input: project_id } + size: { get_input: volume1_size } + relationships: + - type: cloudify.relationships.openstack.volume_attached_to_server + target: example-volume-booted-server + + example-volume-2: + type: cloudify.nodes.openstack.Volume + properties: + client_config: *client_config + use_external_resource: { get_input: volume1_use_existing } + resource_config: + name: { concat: [ { get_input: name_prefix }, 'volume_2' ] } + description: 'Example Volume Size 2' + project_id: { get_input: project_id } + size: { get_input: volume2_size } + relationships: + - type: cloudify.relationships.openstack.volume_attached_to_server + target: example-volume-booted-server + + example-public-port: + type: cloudify.nodes.openstack.Port + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'public_port' ] } + fixed_ips: + - subnet_id: { get_attribute: [ example-public-subnet, id ] } + relationships: + - type: cloudify.relationships.contained_in + target: example-public-network + - type: cloudify.relationships.depends_on + target: example-public-subnet + - type: cloudify.relationships.connected_to + target: example-security-group + - type: cloudify.relationships.openstack.port_connected_to_floating_ip + target: example-floating-ip-address + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-private-port: + type: cloudify.nodes.openstack.Port + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'private_port' ] } + fixed_ips: + - subnet_id: { get_attribute: [ example-private-subnet, id ] } + relationships: + - type: cloudify.relationships.contained_in + target: example-private-network + - type: cloudify.relationships.depends_on + target: example-private-subnet + - type: cloudify.relationships.connected_to + target: example-security-group + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-security-group: + type: cloudify.nodes.openstack.SecurityGroup + properties: + client_config: *client_config + security_group_rules: { get_input: security_group_rules } + resource_config: + name: { concat: [ { get_input: name_prefix }, 'security_group' ] } + description: 'A security group for openstack boot volume' + + example-floating-ip-address: + type: cloudify.nodes.openstack.FloatingIP + properties: + client_config: *client_config + relationships: + - type: cloudify.relationships.connected_to + target: example-external-network + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-private-subnet: + type: cloudify.nodes.openstack.Subnet + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'private_subnet' ] } + cidr: { get_input: example_private_subnet_cidr } + ip_version: 4 + relationships: + - type: cloudify.relationships.contained_in + target: example-private-network + - type: cloudify.relationships.openstack.subnet_connected_to_router + target: example-router + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-private-network: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'private_network' ] } + + example-public-subnet: + type: cloudify.nodes.openstack.Subnet + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'public_subnet' ] } + cidr: { get_input: example_public_subnet_cidr } + ip_version: 4 + relationships: + - type: cloudify.relationships.contained_in + target: example-public-network + - type: cloudify.relationships.openstack.subnet_connected_to_router + target: example-router + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-public-network: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'public_network' ] } + + example-router: + type: cloudify.nodes.openstack.Router + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'router' ] } + relationships: + - type: cloudify.relationships.connected_to + target: example-external-network + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-external-network: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + use_external_resource: true + resource_config: + id: { get_input: external_network_id } + kwargs: + routing: + external: true diff --git a/examples/local/volume_type.yaml b/examples/local/volume_type.yaml new file mode 100644 index 00000000..fc53a322 --- /dev/null +++ b/examples/local/volume_type.yaml @@ -0,0 +1,50 @@ +tosca_definitions_version: cloudify_dsl_1_3 + +imports: + - http://www.getcloudify.org/spec/cloudify/4.5/types.yaml + # For use with a manager use: plugin:cloudify-openstack-plugin + - plugin.yaml + +inputs: + + auth_url: + type: string + + username: + type: string + + password: + type: string + + region_name: + type: string + + project_name: + type: string + + name_prefix: + type: string + default: block_storage_ + + volume_type_config: + default: + name: { concat: [ { get_input: name_prefix }, 'volume_type' ] } + extra_specs: + capabilities: 'gpu' + +dsl_definitions: + + client_config: &client_config + auth_url: { get_input: auth_url } + username: { get_input: username } + password: { get_input: password } + region_name: { get_input: region_name } + project_name: { get_input: project_name } + +node_templates: + + example-volume-type: + type: cloudify.nodes.openstack.VolumeType + properties: + client_config: *client_config + resource_config: { get_input: volume_type_config } diff --git a/examples/local/windows-blueprint.yaml b/examples/local/windows-blueprint.yaml new file mode 100644 index 00000000..f781e194 --- /dev/null +++ b/examples/local/windows-blueprint.yaml @@ -0,0 +1,204 @@ +tosca_definitions_version: cloudify_dsl_1_3 + +imports: + - http://www.getcloudify.org/spec/cloudify/4.5/types.yaml + # For use with a manager use: plugin:cloudify-openstack-plugin + - plugin.yaml + +inputs: + + auth_url: + type: string + + username: + type: string + + password: + type: string + + region_name: + type: string + + project_name: + type: string + + name_prefix: + type: string + default: 'windows-server-blueprint_' + description: Prefix string for all resources + + external_network_id: + type: string + description: A network that provisions floating IPs. + + image: + type: string + description: The UUID of an Windows image in your Openstack account. + + flavor: + type: string + description: The UUID of small flavor in your Openstack account. + + network_id: + type: string + description: The UUID for your Cloudify Manager Openstack Network. + + subnet_id: + type: string + description: The UUID for your Cloudify Manager Openstack Subnet. + + +dsl_definitions: + client_config: &client_config + auth_url: { get_input: auth_url } + username: { get_input: username } + password: { get_input: password } + region_name: { get_input: region_name } + project_name: { get_input: project_name } + +node_templates: + + example-external-network: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + use_external_resource: true + resource_config: + id: { get_input: external_network_id } + kwargs: + routing: + external: true + + example-server-node: + type: cloudify.nodes.openstack.WindowsServer + properties: + use_password: true + client_config: *client_config + agent_config: + install_method: init_script + user: Admin + resource_config: + name: { concat: [ { get_input: name_prefix }, 'server' ] } + image_id: { get_input: image } + flavor_id: { get_input: flavor } + relationships: + - type: cloudify.relationships.openstack.server_connected_to_port + target: example-port + - type: cloudify.relationships.openstack.server_connected_to_keypair + target: example-keypair + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + + example-keypair: + type: cloudify.nodes.openstack.KeyPair + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'keypair' ] } + + example-network: + type: cloudify.nodes.openstack.Network + properties: + use_external_resource: true + client_config: *client_config + resource_config: + id: { get_input: network_id } + + example-subnet: + type: cloudify.nodes.openstack.Subnet + properties: + use_external_resource: true + client_config: *client_config + resource_config: + id: { get_input: subnet_id } + relationships: + - type: cloudify.relationships.contained_in + target: example-network + + example-security-group: + type: cloudify.nodes.openstack.SecurityGroup + properties: + client_config: *client_config + security_group_rules: + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 80 + port_range_min: 80 + direction: ingress + protocol: tcp + + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 80 + port_range_min: 80 + direction: egress + protocol: tcp + + - remote_ip_prefix: 0.0.0.0/0 + port_range_min: 53333 + port_range_max: 53333 + protocol: tcp + direction: ingress + + - remote_ip_prefix: 0.0.0.0/0 + port_range_min: 53333 + port_range_max: 53333 + protocol: tcp + direction: egress + + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 5985 + port_range_min: 5985 + direction: ingress + protocol: tcp + + - remote_ip_prefix: 0.0.0.0/0 + port_range_max: 5985 + port_range_min: 5985 + direction: egress + protocol: tcp + + resource_config: + name: { concat: [ { get_input: name_prefix }, 'security-group' ] } + description: 'A security group created by Cloudify OpenStack SDK plugin.' + + example-port: + type: cloudify.nodes.openstack.Port + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'port' ] } + fixed_ips: + - subnet_id: { get_attribute: [ example-subnet, id ] } + relationships: + - type: cloudify.relationships.connected_to + target: example-network + - type: cloudify.relationships.connected_to + target: example-subnet + - type: cloudify.relationships.connected_to + target: example-security-group + - type: cloudify.relationships.openstack.port_connected_to_floating_ip + target: example-ip + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-ip: + type: cloudify.nodes.openstack.FloatingIP + properties: + client_config: *client_config + relationships: + - type: cloudify.relationships.connected_to + target: example-external-network + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } diff --git a/examples/manager/blueprint.yaml b/examples/manager/blueprint.yaml new file mode 100644 index 00000000..2c4942c5 --- /dev/null +++ b/examples/manager/blueprint.yaml @@ -0,0 +1,207 @@ +tosca_definitions_version: cloudify_dsl_1_3 + +imports: + - http://www.getcloudify.org/spec/cloudify/4.5.5/types.yaml + - plugin:cloudify-openstack-plugin + +inputs: + + auth_url: + type: string + + username: + type: string + + password: + type: string + + region_name: + type: string + + project_name: + type: string + + external_network_id: + type: string + + example_subnet_cidr: + type: string + default: 10.0.0.0/16 + + image_id: + type: string + + flavor_id: + type: string + + name_prefix: + type: string + + agent_user: + type: string + +dsl_definitions: + + client_config: &client_config + auth_url: { get_input: auth_url } + username: { get_input: username } + password: { get_input: password } + project_name: { get_input: project_name } + region_name: { get_input: region_name } + +node_templates: + + example-external_network: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + use_external_resource: true + resource_config: + id: { get_input: external_network_id } + kwargs: + routing: + external: true + + example-router: + type: cloudify.nodes.openstack.Router + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'router' ] } + relationships: + - type: cloudify.relationships.connected_to + target: example-external_network + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-network: + type: cloudify.nodes.openstack.Network + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'network' ] } + + example-subnet: + type: cloudify.nodes.openstack.Subnet + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'subnet' ] } + cidr: { get_input: example_subnet_cidr } + enable_dhcp: true + ip_version: 4 + dns_nameservers: [8.8.4.4, 8.8.8.8] + relationships: + - type: cloudify.relationships.contained_in + target: example-network + - type: cloudify.relationships.openstack.subnet_connected_to_router + target: example-router + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-security-group: + type: cloudify.nodes.openstack.SecurityGroup + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'security_group' ] } + description: 'A security group created by Cloudify OpenStack SDK plugin.' + + example-security-group-rule: + type: cloudify.nodes.openstack.SecurityGroupRule + properties: + client_config: *client_config + resource_config: + direction: ingress + protocol: tcp + port_range_max: 22 + port_range_min: 22 + security_group_id: { get_attribute: [ example-security-group, id ] } + relationships: + - type: cloudify.relationships.contained_in + target: example-security-group + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-server-port: + type: cloudify.nodes.openstack.Port + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'server-port' ] } + network_id: { get_attribute: [ example-network, id ] } + fixed_ips: + - subnet_id: { get_attribute: [ example-subnet, id ] } + relationships: + - type: cloudify.relationships.connected_to + target: example-subnet + - type: cloudify.relationships.connected_to + target: example-security-group + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + cloudify.interfaces.operations: + list: + inputs: + query: + all_tenants: false + + example-floating_ip_address: + type: cloudify.nodes.openstack.FloatingIP + properties: + client_config: *client_config + relationships: + - type: cloudify.relationships.connected_to + target: example-external_network + - type: cloudify.relationships.connected_to + target: example-server-port + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } + + example-keypair: + type: cloudify.nodes.openstack.KeyPair + properties: + client_config: *client_config + resource_config: + name: { concat: [ { get_input: name_prefix }, 'keypair' ] } + + example-server: + type: cloudify.nodes.openstack.Server + properties: + client_config: *client_config + agent_config: + install_method: none + resource_config: + name: { concat: [ { get_input: name_prefix }, 'server' ] } + image_id: { get_input: image_id } + flavor_id: { get_input: flavor_id } + relationships: + - type: cloudify.relationships.openstack.server_connected_to_port + target: example-server-port + - type: cloudify.relationships.openstack.server_connected_to_keypair + target: example-keypair + # This is only run on local mode. For use with a manager, it can be commented out. + interfaces: + cloudify.interfaces.lifecycle: + create: + inputs: + resource_config: { get_property: [ SELF, resource_config ] } diff --git a/glance_plugin/__init__.py b/glance_plugin/__init__.py deleted file mode 100644 index 809f033a..00000000 --- a/glance_plugin/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -######### -# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. diff --git a/glance_plugin/image.py b/glance_plugin/image.py deleted file mode 100644 index 303f8782..00000000 --- a/glance_plugin/image.py +++ /dev/null @@ -1,186 +0,0 @@ -######### -# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -import httplib -from urlparse import urlparse - -from cloudify import ctx -from cloudify.decorators import operation -from cloudify.exceptions import NonRecoverableError - -from openstack_plugin_common import ( - with_glance_client, - get_openstack_id, - use_external_resource, - create_object_dict, - get_openstack_ids_of_connected_nodes_by_openstack_type, - delete_resource_and_runtime_properties, - validate_resource, - add_list_to_runtime_properties, - set_openstack_runtime_properties, - COMMON_RUNTIME_PROPERTIES_KEYS) - - -IMAGE_OPENSTACK_TYPE = 'image' -IMAGE_STATUS_ACTIVE = 'active' - -RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS -REQUIRED_PROPERTIES = ['container_format', 'disk_format'] - - -@operation -@with_glance_client -def create(glance_client, args, **kwargs): - if use_external_resource(ctx, glance_client, IMAGE_OPENSTACK_TYPE): - return - - _validate_image_dictionary() - img_dict = create_object_dict(ctx, IMAGE_OPENSTACK_TYPE, args, {}) - img_path = img_dict.pop('data', '') - img = glance_client.images.create(**img_dict) - img_url = ctx.node.properties.get('image_url') - try: - _validate_image() - if img_path: - with open(img_path, 'rb') as image_file: - glance_client.images.upload( - image_id=img.id, - image_data=image_file) - elif img_url: - img = glance_client.images.add_location(img.id, img_url, {}) - except Exception: - _remove_protected(glance_client) - glance_client.images.delete(image_id=img.id) - raise - - set_openstack_runtime_properties(ctx, img, IMAGE_OPENSTACK_TYPE) - - -def _get_image_by_ctx(glance_client, ctx): - return glance_client.images.get( - image_id=get_openstack_id(ctx)) - - -@operation -@with_glance_client -def start(glance_client, start_retry_interval, **kwargs): - img = _get_image_by_ctx(glance_client, ctx) - if img.status != IMAGE_STATUS_ACTIVE: - return ctx.operation.retry( - message='Waiting for image to get uploaded', - retry_after=start_retry_interval) - - -@operation -@with_glance_client -def delete(glance_client, **kwargs): - _remove_protected(glance_client) - delete_resource_and_runtime_properties(ctx, glance_client, - RUNTIME_PROPERTIES_KEYS) - - -@with_glance_client -def list_images(glance_client, args, **kwargs): - image_list_generator = glance_client.images.list(**args) - add_list_to_runtime_properties( - ctx, - IMAGE_OPENSTACK_TYPE, - [dict(item) for item in image_list_generator]) - - -@with_glance_client -def update(glance_client, args, **kwargs): - image_dict = create_object_dict(ctx, IMAGE_OPENSTACK_TYPE, args) - image_dict[IMAGE_OPENSTACK_TYPE] = get_openstack_id(ctx) - image = glance_client.images.update(**image_dict) - set_openstack_runtime_properties(ctx, image, IMAGE_OPENSTACK_TYPE) - - -@operation -@with_glance_client -def creation_validation(glance_client, **kwargs): - validate_resource(ctx, glance_client, IMAGE_OPENSTACK_TYPE) - _validate_image_dictionary() - _validate_image() - - -def _validate_image_dictionary(): - img = ctx.node.properties[IMAGE_OPENSTACK_TYPE] - missing = '' - try: - for prop in REQUIRED_PROPERTIES: - if prop not in img: - missing += '{0} '.format(prop) - except TypeError: - missing = ' '.join(REQUIRED_PROPERTIES) - if missing: - raise NonRecoverableError('Required properties are missing: {' - '0}. Please update your image ' - 'dictionary.'.format(missing)) - - -def _validate_image(): - img = ctx.node.properties[IMAGE_OPENSTACK_TYPE] - img_path = img.get('data') - img_url = ctx.node.properties.get('image_url') - if not img_url and not img_path: - raise NonRecoverableError('Neither image url nor image path was ' - 'provided') - if img_url and img_path: - raise NonRecoverableError('Multiple image sources provided') - if img_url: - _check_url(img_url) - if img_path: - _check_path() - - -def _check_url(url): - p = urlparse(url) - conn = httplib.HTTPConnection(p.netloc) - conn.request('HEAD', p.path) - resp = conn.getresponse() - if resp.status >= 400: - raise NonRecoverableError('Invalid image URL') - - -def _check_path(): - img = ctx.node.properties[IMAGE_OPENSTACK_TYPE] - img_path = img.get('data') - try: - with open(img_path, 'rb'): - pass - except TypeError: - if not img.get('url'): - raise NonRecoverableError('No path or url provided') - except IOError: - raise NonRecoverableError( - 'Unable to open image file with path: "{}"'.format(img_path)) - - -def _remove_protected(glance_client): - if use_external_resource(ctx, glance_client, IMAGE_OPENSTACK_TYPE): - return - - is_protected = ctx.node.properties[IMAGE_OPENSTACK_TYPE].get('protected', - False) - if is_protected: - img_id = get_openstack_id(ctx) - glance_client.images.update(img_id, protected=False) - - -def handle_image_from_relationship(obj_dict, property_name_to_put, ctx): - images = get_openstack_ids_of_connected_nodes_by_openstack_type( - ctx, IMAGE_OPENSTACK_TYPE) - if images: - obj_dict.update({property_name_to_put: images[0]}) diff --git a/glance_plugin/tests/resources/test-image-start.yaml b/glance_plugin/tests/resources/test-image-start.yaml deleted file mode 100644 index 12c9aa79..00000000 --- a/glance_plugin/tests/resources/test-image-start.yaml +++ /dev/null @@ -1,30 +0,0 @@ - -tosca_definitions_version: cloudify_dsl_1_3 - -imports: - - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-manager/4.1/resources/rest-service/cloudify/types/types.yaml - - plugin.yaml - -inputs: - use_password: - type: boolean - default: false - -node_templates: - image: - type: cloudify.openstack.nodes.Image - properties: - image: - disk_format: test_format - container_format: test_format - data: test_path - openstack_config: - username: aaa - password: aaa - tenant_name: aaa - auth_url: aaa - interfaces: - cloudify.interfaces.lifecycle: - start: - inputs: - start_retry_interval: 1 diff --git a/glance_plugin/tests/test.py b/glance_plugin/tests/test.py deleted file mode 100644 index 4a88cba4..00000000 --- a/glance_plugin/tests/test.py +++ /dev/null @@ -1,148 +0,0 @@ -######### -# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - -import mock -import os -import tempfile -import unittest - -import glance_plugin -from glance_plugin import image - -from cloudify.mocks import MockCloudifyContext -from cloudify.test_utils import workflow_test -from cloudify.exceptions import NonRecoverableError - - -def ctx_mock(image_dict): - return MockCloudifyContext( - node_id='d', - properties=image_dict) - - -class TestCheckImage(unittest.TestCase): - - @mock.patch('glance_plugin.image.ctx', - ctx_mock({'image': {}})) - def test_check_image_no_file_no_url(self): - # Test if it throws exception no file & no url - self.assertRaises(NonRecoverableError, - image._validate_image) - - @mock.patch('glance_plugin.image.ctx', - ctx_mock({'image_url': 'test-url', 'image': {'data': '.'}})) - def test_check_image_and_url(self): - # Test if it throws exception file & url - self.assertRaises(NonRecoverableError, - image._validate_image) - - @mock.patch('glance_plugin.image.ctx', - ctx_mock({'image_url': 'test-url', 'image': {}})) - def test_check_image_url(self): - # test if it passes no file & url - http_connection_mock = mock.MagicMock() - http_connection_mock.return_value.getresponse.return_value.status = 200 - with mock.patch('httplib.HTTPConnection', http_connection_mock): - glance_plugin.image._validate_image() - - def test_check_image_file(self): - # test if it passes file & no url - image_file_path = tempfile.mkstemp()[1] - with mock.patch('glance_plugin.image.ctx', - ctx_mock({'image': {'data': image_file_path}})): - glance_plugin.image._validate_image() - - @mock.patch('glance_plugin.image.ctx', - ctx_mock({'image': {'data': '/test/path'}})) - # test when open file throws IO error - def test_check_image_bad_file(self): - open_name = '%s.open' % __name__ - with mock.patch(open_name, create=True) as mock_open: - mock_open.side_effect = [mock_open(read_data='Data').return_value] - self.assertRaises(NonRecoverableError, - glance_plugin.image._validate_image) - - @mock.patch('glance_plugin.image.ctx', - ctx_mock({'image_url': '?', 'image': {}})) - # test when bad url - def test_check_image_bad_url(self): - http_connection_mock = mock.MagicMock() - http_connection_mock.return_value.getresponse.return_value.status = 400 - with mock.patch('httplib.HTTPConnection', http_connection_mock): - self.assertRaises(NonRecoverableError, - glance_plugin.image._validate_image) - - -class TestValidateProperties(unittest.TestCase): - - @mock.patch('glance_plugin.image.ctx', - ctx_mock({'image': {'container_format': 'bare'}})) - def test_check_image_container_format_no_disk_format(self): - # Test if it throws exception no file & no url - self.assertRaises(NonRecoverableError, - image._validate_image_dictionary) - - @mock.patch('glance_plugin.image.ctx', - ctx_mock({'image': {'disk_format': 'qcow2'}})) - def test_check_image_no_container_format_disk_format(self): - # Test if it throws exception no container_format & disk_format - self.assertRaises(NonRecoverableError, - image._validate_image_dictionary) - - @mock.patch('glance_plugin.image.ctx', - ctx_mock({'image': {}})) - def test_check_image_no_container_format_no_disk_format(self): - # Test if it throws exception no container_format & no disk_format - self.assertRaises(NonRecoverableError, - image._validate_image_dictionary) - - @mock.patch('glance_plugin.image.ctx', - ctx_mock( - {'image': - {'container_format': 'bare', - 'disk_format': 'qcow2'}})) - def test_check_image_container_format_disk_format(self): - # Test if it do not throw exception container_format & disk_format - image._validate_image_dictionary() - - -class TestStartImage(unittest.TestCase): - blueprint_path = os.path.join('resources', - 'test-image-start.yaml') - - @mock.patch('glance_plugin.image.create') - @workflow_test(blueprint_path, copy_plugin_yaml=True) - def test_image_lifecycle_start(self, cfy_local, *_): - test_vars = { - 'counter': 0, - 'image': mock.MagicMock() - } - - def _mock_get_image_by_ctx(*_): - i = test_vars['image'] - if test_vars['counter'] == 0: - i.status = 'different image status' - else: - i.status = glance_plugin.image.IMAGE_STATUS_ACTIVE - test_vars['counter'] += 1 - return i - - with mock.patch('openstack_plugin_common.GlanceClient'): - with mock.patch('glance_plugin.image._get_image_by_ctx', - side_effect=_mock_get_image_by_ctx): - cfy_local.execute('install', task_retries=3) - - self.assertEqual(2, test_vars['counter']) - self.assertEqual(0, test_vars['image'].start.call_count) diff --git a/keystone_plugin/__init__.py b/keystone_plugin/__init__.py deleted file mode 100644 index 809f033a..00000000 --- a/keystone_plugin/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -######### -# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. diff --git a/keystone_plugin/project.py b/keystone_plugin/project.py deleted file mode 100644 index 1d0ffc08..00000000 --- a/keystone_plugin/project.py +++ /dev/null @@ -1,210 +0,0 @@ -######### -# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -from cloudify import ctx - -from cloudify.decorators import operation -from cloudify.exceptions import NonRecoverableError - -from openstack_plugin_common import (with_keystone_client, - with_nova_client, - with_cinder_client, - with_neutron_client, - get_openstack_id, - use_external_resource, - delete_resource_and_runtime_properties, - add_list_to_runtime_properties, - validate_resource, - create_object_dict, - set_openstack_runtime_properties, - COMMON_RUNTIME_PROPERTIES_KEYS) - - -PROJECT_OPENSTACK_TYPE = 'project' - -PROJECT_QUOTA_TYPE = 'quota' - -RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS - -NOVA = 'nova' -CINDER = 'cinder' -NEUTRON = 'neutron' - -QUOTA = 'quota' -USERS = 'users' -ROLES = 'roles' - - -@operation -@with_keystone_client -def create(keystone_client, args, **kwargs): - if use_external_resource(ctx, keystone_client, PROJECT_OPENSTACK_TYPE): - return - - project_dict = create_object_dict(ctx, - PROJECT_OPENSTACK_TYPE, - args, - {'domain': 'default'}) - - project = keystone_client.projects.create(**project_dict) - set_openstack_runtime_properties(ctx, project, PROJECT_OPENSTACK_TYPE) - - -@operation -def start(quota_dict, **kwargs): - users = ctx.node.properties[USERS] - validate_users(users, **kwargs) - assign_users(users, **kwargs) - quota = ctx.node.properties[PROJECT_QUOTA_TYPE] - quota.update(quota_dict) - update_project_quota(quota=quota, **kwargs) - - -@operation -@with_keystone_client -@with_nova_client -@with_cinder_client -@with_neutron_client -def delete(keystone_client, nova_client, cinder_client, - neutron_client, **kwargs): - project_id = get_openstack_id(ctx) - quota = ctx.node.properties[PROJECT_QUOTA_TYPE] - delete_quota(project_id, quota, nova_client, NOVA) - delete_quota(project_id, quota, neutron_client, NEUTRON) - delete_quota(project_id, quota, cinder_client, CINDER) - delete_resource_and_runtime_properties(ctx, keystone_client, - RUNTIME_PROPERTIES_KEYS) - - -@operation -@with_keystone_client -def creation_validation(keystone_client, **kwargs): - validate_resource(ctx, keystone_client, PROJECT_OPENSTACK_TYPE) - - -@with_keystone_client -def assign_users(users, keystone_client, **kwargs): - project_id = get_openstack_id(ctx) - for user in users: - roles = user[ROLES] - u = keystone_client.users.find(name=user['name']) - for role in roles: - r = keystone_client.roles.find(name=role) - keystone_client.roles.grant(user=u.id, - project=project_id, - role=r.id) - ctx.logger.debug("Assigned user {0} to project {1} with role {2}" - .format(u.id, project_id, r.id)) - - -@with_keystone_client -def validate_users(users, keystone_client, **kwargs): - user_names = [user['name'] for user in users] - if len(user_names) > len(set(user_names)): - raise NonRecoverableError('Users are not unique') - - for user_name in user_names: - keystone_client.users.find(name=user_name) - - for user in users: - if len(user[ROLES]) > len(set(user[ROLES])): - msg = 'Roles for user {} are not unique' - raise NonRecoverableError(msg.format(user['name'])) - - role_names = {role for user in users for role in user[ROLES]} - for role_name in role_names: - keystone_client.roles.find(name=role_name) - - -def get_quota(tenant_id, client, what_quota): - if what_quota == NEUTRON: - quota = dict(client.show_quota(tenant_id=tenant_id)).get('quota') - else: - quota = client.quotas.get(tenant_id=tenant_id).to_dict() - - ctx.logger.debug( - 'Got {0} quota: {1}'.format(what_quota, str(quota))) - - return quota - - -def update_quota(tenant_id, quota, client, what_quota): - updated_quota = quota.get(what_quota) - if updated_quota: - if what_quota == NEUTRON: - new_quota = client.update_quota(tenant_id=tenant_id, - body={QUOTA: updated_quota}) - else: - new_quota = client.quotas.update(tenant_id=tenant_id, - **updated_quota) - ctx.logger.debug( - 'Updated {0} quota: {1}'.format(what_quota, str(new_quota))) - - -def delete_quota(project_id, quota, client, what_quota): - deleting_quota = quota.get(what_quota) - if deleting_quota: - if what_quota == NEUTRON: - client.delete_quota(tenant_id=project_id) - else: - client.quotas.delete(tenant_id=project_id) - ctx.logger.debug( - 'Deleted {0} quota'.format(what_quota)) - - -@with_nova_client -@with_neutron_client -@with_cinder_client -def update_project_quota(nova_client, - cinder_client, - neutron_client, - quota, - **kwargs): - project_id = get_openstack_id(ctx) - update_quota(project_id, quota, nova_client, NOVA) - update_quota(project_id, quota, neutron_client, NEUTRON) - update_quota(project_id, quota, cinder_client, CINDER) - - -@with_keystone_client -def list_projects(keystone_client, args, **kwargs): - projects_list = keystone_client.projects.list(**args) - add_list_to_runtime_properties(ctx, PROJECT_OPENSTACK_TYPE, projects_list) - - -@with_nova_client -@with_neutron_client -@with_cinder_client -def get_project_quota(nova_client, - cinder_client, - neutron_client, - **kwargs): - project_id = get_openstack_id(ctx) - quota = ctx.instance.runtime_properties.get(QUOTA, {}) - quota[NOVA] = get_quota(project_id, nova_client, NOVA) - quota[NEUTRON] = get_quota(project_id, neutron_client, NEUTRON) - quota[CINDER] = get_quota(project_id, cinder_client, CINDER) - ctx.instance.runtime_properties[QUOTA] = quota - - -@with_keystone_client -def update_project(keystone_client, args, **kwargs): - - project_dict = create_object_dict(ctx, - PROJECT_OPENSTACK_TYPE, - args, - {'domain': 'default'}) - project_dict[PROJECT_OPENSTACK_TYPE] = get_openstack_id(ctx) - project = keystone_client.projects.update(**project_dict) - set_openstack_runtime_properties(ctx, project, PROJECT_OPENSTACK_TYPE) diff --git a/keystone_plugin/tests/test_project.py b/keystone_plugin/tests/test_project.py deleted file mode 100644 index db3eadfe..00000000 --- a/keystone_plugin/tests/test_project.py +++ /dev/null @@ -1,295 +0,0 @@ -import mock -import unittest - -from cloudify.context import NODE_INSTANCE -from cloudify.exceptions import NonRecoverableError -from cloudify.context import BootstrapContext -from cloudify.state import current_ctx -import openstack_plugin_common.tests.test as common_test - -from cloudify.mocks import ( - MockContext, - MockNodeInstanceContext, - MockNodeContext -) -from openstack_plugin_common import ( - OPENSTACK_ID_PROPERTY, - OPENSTACK_NAME_PROPERTY, - OPENSTACK_TYPE_PROPERTY -) -from keystone_plugin.project import ( - PROJECT_OPENSTACK_TYPE, - QUOTA -) -import keystone_plugin - - -class TestProject(unittest.TestCase): - - test_id = 'test-id' - test_name = 'test-name' - updated_name = 'updated-name' - test_deployment_id = 'test-deployment-id' - test_user = 'test-user' - test_role = 'test-role' - - class MockProjectOS: - def __init__(self, id, name): - self._id = id - self._name = name - self._users = {} - - @property - def id(self): - return self._id - - @property - def name(self): - return self._name - - def find(self, *_, **__): - return mock.MagicMock(id='test-role') - - def grant(self, role, user, *_, **__): - self._users[user] = role - - def to_dict(self): - return {'name': self.name, 'id': self.id} - - def mock_keystone_client(self, mock_project): - keystone_client = mock.MagicMock() - keystone_client.projects.create.return_value = mock_project - keystone_client.projects.list.return_value = \ - {'projects': [mock_project]} - keystone_client.users.find.return_value = mock.MagicMock( - id=self.test_user) - keystone_client.projects.update.return_value = self.MockProjectOS( - self.id, self.updated_name) - keystone_client.roles = mock_project - return keystone_client - - def mock_ctx(self, test_vars, test_id, - test_deployment_id, runtime_properties=None): - ctx = MockContext() - ctx.node = MockNodeContext(properties=test_vars) - ctx.bootstrap_context = BootstrapContext( - common_test.BOOTSTRAP_CONTEXTS_WITHOUT_PREFIX[0]) - ctx.instance = MockNodeInstanceContext( - id=test_id, runtime_properties=runtime_properties or {}) - ctx.deployment = mock.Mock() - ctx.deployment.id = test_deployment_id - ctx.type = NODE_INSTANCE - ctx.logger = mock.Mock() - current_ctx.set(ctx) - return ctx - - @mock.patch('openstack_plugin_common._handle_kw', - autospec=True, return_value=None) - def test_keystone_project_create_and_delete(self, *_): - quota = {'nova': {'cpu': 120}, - 'neutron': {'networks': 100}} - test_vars = { - 'project': {}, - 'resource_id': '', - 'quota': quota, - 'users': {} - } - - ctx = self.mock_ctx(test_vars, self.test_id, self.test_deployment_id) - keystone_plugin.project.ctx = ctx - mock_project = self.MockProjectOS(self.test_id, self.test_name) - keystone_client = self.mock_keystone_client(mock_project) - keystone_plugin.project.create(keystone_client, {}) - self.assertEqual(self.test_name, - ctx.instance.runtime_properties[ - OPENSTACK_NAME_PROPERTY]) - self.assertEqual(self.test_id, - ctx.instance.runtime_properties[ - OPENSTACK_ID_PROPERTY]) - self.assertEqual(PROJECT_OPENSTACK_TYPE, - ctx.instance.runtime_properties[ - OPENSTACK_TYPE_PROPERTY]) - - keystone_plugin.project.delete( - keystone_client=keystone_client, # keystone_client - nova_client=mock.MagicMock(), # nova_client - cinder_client=mock.MagicMock(), # cinder_client - neutron_client=mock.MagicMock()) # neutron_client - self.assertNotIn(OPENSTACK_ID_PROPERTY, - ctx.instance.runtime_properties) - self.assertNotIn(OPENSTACK_NAME_PROPERTY, - ctx.instance.runtime_properties) - self.assertNotIn(OPENSTACK_TYPE_PROPERTY, - ctx.instance.runtime_properties) - - def test_assign_user(self, *_): - test_vars = { - 'project': {}, - 'resource_id': '', - 'quota': {}, - 'users': [{'name': self.test_user, - 'roles': [self.test_role]}] - } - ctx = self.mock_ctx(test_vars, - self.test_id, - self.test_deployment_id, - {OPENSTACK_ID_PROPERTY: self.test_id}) - mock_project = self.MockProjectOS(self.test_id, self.test_name) - keystone_client = self.mock_keystone_client(mock_project) - keystone_plugin.project.ctx = ctx - keystone_plugin.project.start( - {}, - keystone_client=keystone_client, # keystone_client - nova_client=mock.MagicMock(), # nova_client - cinder_client=mock.MagicMock(), # cinder_client - neutron_client=mock.MagicMock()) # neutron_client - self.assertEqual({self.test_user: self.test_role}, - mock_project._users) - - def test_assign_users_not_unique(self, *_): - test_vars = { - 'project': {}, - 'resource_id': '', - 'quota': {}, - 'users': [{'name': self.test_user, - 'roles': [self.test_role]}, - {'name': self.test_user, - 'roles': [self.test_role]}] - } - ctx = self.mock_ctx(test_vars, - self.test_id, - self.test_deployment_id, - {OPENSTACK_ID_PROPERTY: self.test_id}) - mock_project = self.MockProjectOS(self.test_id, self.test_name) - keystone_client = self.mock_keystone_client(mock_project) - keystone_plugin.project.ctx = ctx - with self.assertRaises(NonRecoverableError): - keystone_plugin.project.start( - {}, - keystone_client=keystone_client, # keystone_client - nova_client=mock.MagicMock(), # nova_client - cinder_client=mock.MagicMock(), # cinder_client - neutron_client=mock.MagicMock()) # neutron_client - - def test_assign_user_roles_not_unique(self, *_): - test_vars = { - 'project': {}, - 'resource_id': '', - 'quota': {}, - 'users': [{'name': self.test_user, - 'roles': [self.test_role, self.test_role]}] - } - ctx = self.mock_ctx(test_vars, - self.test_id, - self.test_deployment_id, - {OPENSTACK_ID_PROPERTY: self.test_id}) - mock_project = self.MockProjectOS(self.test_id, self.test_name) - keystone_client = self.mock_keystone_client(mock_project) - keystone_plugin.project.ctx = ctx - with self.assertRaises(NonRecoverableError): - keystone_plugin.project.start( - {}, - keystone_client=keystone_client, # keystone_client - nova_client=mock.MagicMock(), # nova_client - cinder_client=mock.MagicMock(), # cinder_client - neutron_client=mock.MagicMock()) # neutron_client - - def test_update_project(self, *_): - test_vars = { - 'project': {}, - 'resource_id': '', - 'quota': {}, - 'users': [{'name': self.test_user, - 'roles': [self.test_role]}] - } - ctx = self.mock_ctx(test_vars, - self.test_id, - self.test_deployment_id, - {OPENSTACK_ID_PROPERTY: self.test_id}) - mock_project = self.MockProjectOS(self.test_id, self.test_name) - keystone_client = self.mock_keystone_client(mock_project) - keystone_plugin.project.ctx = ctx - keystone_plugin.project.update_project(args={}, - keystone_client=keystone_client) - self.assertEqual(self.updated_name, - ctx.instance.runtime_properties[ - OPENSTACK_NAME_PROPERTY]) - - def test_list_projects(self, *_): - test_vars = { - 'project': {}, - 'resource_id': '', - 'quota': {}, - 'users': [{'name': self.test_user, - 'roles': [self.test_role]}] - } - ctx = self.mock_ctx(test_vars, - self.test_id, - self.test_deployment_id, - {OPENSTACK_ID_PROPERTY: self.test_id}) - mock_project = self.MockProjectOS(self.test_id, self.test_name) - keystone_client = self.mock_keystone_client(mock_project) - keystone_plugin.project.ctx = ctx - keystone_plugin.project.list_projects(args={}, - keystone_client=keystone_client) - project_list = PROJECT_OPENSTACK_TYPE + '_list' - self.assertIn(project_list, ctx.instance.runtime_properties) - self.assertEqual(1, - len(ctx.instance.runtime_properties[project_list])) - - def test_get_quota(self, *_): - nova_quota = {'cpu': 120} - cinder_quota = {'volumes': 30} - neutron_quota = {'networks': 100} - - quota = { - 'nova': nova_quota, - 'neutron': neutron_quota, - 'cinder': cinder_quota - } - - test_vars = { - 'project': {}, - 'resource_id': '', - 'quota': {}, - 'users': [{'name': self.test_user, - 'roles': [self.test_role]}] - } - - ctx = self.mock_ctx(test_vars, - self.test_id, - self.test_deployment_id, - {OPENSTACK_ID_PROPERTY: self.test_id}) - keystone_plugin.project.ctx = ctx - - nova_quota_response_mock = mock.MagicMock() - nova_quota_response_mock.to_dict = mock.MagicMock( - return_value=nova_quota - ) - nova_client = mock.MagicMock() - nova_client.quotas.get = mock.MagicMock( - return_value=nova_quota_response_mock - ) - - cinder_quota_response_mock = mock.MagicMock() - cinder_quota_response_mock.to_dict = mock.MagicMock( - return_value=cinder_quota - ) - cinder_client = mock.MagicMock() - cinder_client.quotas.get = mock.MagicMock( - return_value=cinder_quota_response_mock - ) - - neutron_client = mock.MagicMock() - neutron_client.show_quota = mock.MagicMock( - return_value={'quota': neutron_quota} - # format of neutron client 'show_quota' response - ) - - keystone_plugin.project.get_project_quota( - nova_client=nova_client, - cinder_client=cinder_client, - neutron_client=neutron_client) - - self.assertIn(QUOTA, ctx.instance.runtime_properties) - self.assertDictEqual(quota, ctx.instance.runtime_properties[QUOTA]) diff --git a/keystone_plugin/tests/test_user.py b/keystone_plugin/tests/test_user.py deleted file mode 100644 index 4e7d89d2..00000000 --- a/keystone_plugin/tests/test_user.py +++ /dev/null @@ -1,136 +0,0 @@ -import mock -import unittest - -from cloudify.context import NODE_INSTANCE -from cloudify.context import BootstrapContext -from cloudify.state import current_ctx -import openstack_plugin_common.tests.test as common_test - -from cloudify.mocks import ( - MockContext, - MockNodeInstanceContext, - MockNodeContext -) -from openstack_plugin_common import ( - OPENSTACK_ID_PROPERTY, - OPENSTACK_NAME_PROPERTY, - OPENSTACK_TYPE_PROPERTY - ) -from keystone_plugin.user import USER_OPENSTACK_TYPE -import keystone_plugin - - -class TestUser(unittest.TestCase): - - test_id = 'test-id' - test_name = 'test-name' - updated_name = 'updated-name' - test_deployment_id = 'test-deployment-id' - - class MockUserOS: - def __init__(self, id, name): - self._id = id - self._name = name - self._users = {} - - @property - def id(self): - return self._id - - @property - def name(self): - return self._name - - def to_dict(self): - return {'name': self.name, 'id': self.id} - - def mock_keystone_client(self, mock_user): - keystone_client = mock.MagicMock() - keystone_client.users.create.return_value = mock_user - keystone_client.users.list.return_value = [mock_user] - keystone_client.users.find.return_value = mock.MagicMock( - id=self.test_name) - keystone_client.users.update.return_value = self.MockUserOS( - self.id, self.updated_name) - return keystone_client - - def mock_ctx(self, test_vars, test_id, - test_deployment_id, runtime_properties=None): - ctx = MockContext() - ctx.node = MockNodeContext(properties=test_vars) - ctx.bootstrap_context = BootstrapContext( - common_test.BOOTSTRAP_CONTEXTS_WITHOUT_PREFIX[0]) - ctx.instance = MockNodeInstanceContext( - id=test_id, runtime_properties=runtime_properties or {}) - ctx.deployment = mock.Mock() - ctx.deployment.id = test_deployment_id - ctx.type = NODE_INSTANCE - ctx.logger = mock.Mock() - current_ctx.set(ctx) - return ctx - - @mock.patch('openstack_plugin_common._handle_kw', - autospec=True, return_value=None) - def test_keystone_user_create_and_delete(self, *_): - test_vars = { - 'user': {}, - 'resource_id': '' - } - - ctx = self.mock_ctx(test_vars, self.test_id, self.test_deployment_id) - keystone_plugin.user.ctx = ctx - mock_user = self.MockUserOS(self.test_id, self.test_name) - keystone_client = self.mock_keystone_client(mock_user) - keystone_plugin.user.create(keystone_client, {}) - self.assertEqual(self.test_name, - ctx.instance.runtime_properties[ - OPENSTACK_NAME_PROPERTY]) - self.assertEqual(self.test_id, - ctx.instance.runtime_properties[ - OPENSTACK_ID_PROPERTY]) - self.assertEqual(USER_OPENSTACK_TYPE, - ctx.instance.runtime_properties[ - OPENSTACK_TYPE_PROPERTY]) - - keystone_plugin.user.delete(keystone_client=keystone_client) - self.assertNotIn(OPENSTACK_ID_PROPERTY, - ctx.instance.runtime_properties) - self.assertNotIn(OPENSTACK_NAME_PROPERTY, - ctx.instance.runtime_properties) - self.assertNotIn(OPENSTACK_TYPE_PROPERTY, - ctx.instance.runtime_properties) - - def test_update_user(self, *_): - test_vars = { - 'user': {}, - 'resource_id': '' - } - ctx = self.mock_ctx(test_vars, - self.test_id, - self.test_deployment_id, - {OPENSTACK_ID_PROPERTY: self.test_id}) - mock_user = self.MockUserOS(self.test_id, self.test_name) - keystone_client = self.mock_keystone_client(mock_user) - keystone_plugin.user.ctx = ctx - keystone_plugin.user.update(args={}, keystone_client=keystone_client) - self.assertEqual(self.updated_name, - ctx.instance.runtime_properties[ - OPENSTACK_NAME_PROPERTY]) - - def test_list_users(self, *_): - test_vars = { - 'user': {}, - 'resource_id': '' - } - ctx = self.mock_ctx(test_vars, - self.test_id, - self.test_deployment_id, - {OPENSTACK_ID_PROPERTY: self.test_id}) - mock_user = self.MockUserOS(self.test_id, self.test_name) - keystone_client = self.mock_keystone_client(mock_user) - keystone_plugin.user.ctx = ctx - keystone_plugin.user.list_users(args={}, - keystone_client=keystone_client) - user_list = USER_OPENSTACK_TYPE + '_list' - self.assertIn(user_list, ctx.instance.runtime_properties) - self.assertEqual(1, len(ctx.instance.runtime_properties[user_list])) diff --git a/keystone_plugin/user.py b/keystone_plugin/user.py deleted file mode 100644 index 0ac3ccd0..00000000 --- a/keystone_plugin/user.py +++ /dev/null @@ -1,64 +0,0 @@ -######### -# Copyright (c) 2018 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - -from cloudify import ctx -from cloudify.decorators import operation - -from openstack_plugin_common import (with_keystone_client, - use_external_resource, - delete_resource_and_runtime_properties, - create_object_dict, - get_openstack_id, - add_list_to_runtime_properties, - set_openstack_runtime_properties, - COMMON_RUNTIME_PROPERTIES_KEYS) - -USER_OPENSTACK_TYPE = 'user' - -RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS - - -@operation -@with_keystone_client -def create(keystone_client, args, **kwargs): - if use_external_resource(ctx, keystone_client, USER_OPENSTACK_TYPE): - return - - user_dict = create_object_dict(ctx, USER_OPENSTACK_TYPE, args, {}) - user = keystone_client.users.create(**user_dict) - - set_openstack_runtime_properties(ctx, user, USER_OPENSTACK_TYPE) - - -@operation -@with_keystone_client -def delete(keystone_client, **kwargs): - delete_resource_and_runtime_properties(ctx, keystone_client, - RUNTIME_PROPERTIES_KEYS) - - -@operation -@with_keystone_client -def update(keystone_client, args, **kwargs): - user_dict = create_object_dict(ctx, USER_OPENSTACK_TYPE, args, {}) - user_dict[USER_OPENSTACK_TYPE] = get_openstack_id(ctx) - user = keystone_client.users.update(**user_dict) - set_openstack_runtime_properties(ctx, user, USER_OPENSTACK_TYPE) - - -@with_keystone_client -def list_users(keystone_client, args, **kwargs): - users_list = keystone_client.users.list(**args) - add_list_to_runtime_properties(ctx, USER_OPENSTACK_TYPE, users_list) diff --git a/neutron_plugin/__init__.py b/neutron_plugin/__init__.py deleted file mode 100644 index 04cb21f7..00000000 --- a/neutron_plugin/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__author__ = 'idanmo' diff --git a/neutron_plugin/floatingip.py b/neutron_plugin/floatingip.py deleted file mode 100644 index d69ea13d..00000000 --- a/neutron_plugin/floatingip.py +++ /dev/null @@ -1,143 +0,0 @@ -######### -# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - -from cloudify import ctx -from cloudify.decorators import operation -from cloudify.exceptions import NonRecoverableError -from openstack_plugin_common import ( - with_neutron_client, - provider, - get_openstack_id, - add_list_to_runtime_properties, - is_external_relationship, - is_external_relationship_not_conditionally_created, - get_openstack_id_of_single_connected_node_by_openstack_type, -) -from openstack_plugin_common.floatingip import ( - use_external_floatingip, - set_floatingip_runtime_properties, - delete_floatingip, - floatingip_creation_validation -) -from network import NETWORK_OPENSTACK_TYPE - -FLOATINGIP_OPENSTACK_TYPE = 'floatingip' -FLOATING_NETWORK_ERROR_PREFIX = \ - 'Network name must be specified by either a floating_network_name, a ' \ - 'floating_network_id, or a relationship to a Network node template ' -FLOATING_NETWORK_ERROR_SUFFIX = \ - '(provided: network from relationships={}, floatingip={})' -FLOATING_NETWORK_ERROR_MSG = FLOATING_NETWORK_ERROR_PREFIX +\ - FLOATING_NETWORK_ERROR_SUFFIX - - -@operation -@with_neutron_client -def create(neutron_client, args, **kwargs): - - if use_external_floatingip(neutron_client, 'floating_ip_address', - lambda ext_fip: ext_fip['floating_ip_address']): - return - - floatingip = { - # No defaults - } - floatingip.update(ctx.node.properties[FLOATINGIP_OPENSTACK_TYPE], **args) - - network_from_rel = \ - get_openstack_id_of_single_connected_node_by_openstack_type( - ctx, NETWORK_OPENSTACK_TYPE, True) - - if 'floating_network_id' in floatingip: - ctx.logger.debug( - 'Using floating ip network {0}.'.format( - floatingip['floating_network_id'])) - elif 'floating_network_name' in floatingip: - floatingip['floating_network_id'] = neutron_client.cosmo_get_named( - 'network', floatingip['floating_network_name'])['id'] - ctx.logger.debug( - 'Using floating ip network {0} from name {1} provided.'.format( - floatingip['floating_network_id'], - floatingip['floating_network_name'])) - del floatingip['floating_network_name'] - elif network_from_rel: - floatingip['floating_network_id'] = network_from_rel - ctx.logger.debug( - 'Using floating ip network {0} from relationship.'.format( - floatingip['floating_network_id'])) - else: - provider_context = provider(ctx) - ext_network = provider_context.ext_network - if ext_network: - floatingip['floating_network_id'] = ext_network['id'] - ctx.logger.debug( - 'Using floating ip network {0} from provider context.'.format( - floatingip['floating_network_id'])) - else: - raise NonRecoverableError(FLOATING_NETWORK_ERROR_MSG.format( - None, None)) - - fip = neutron_client.create_floatingip( - {FLOATINGIP_OPENSTACK_TYPE: floatingip})[FLOATINGIP_OPENSTACK_TYPE] - set_floatingip_runtime_properties(fip['id'], fip['floating_ip_address']) - - ctx.logger.info('Floating IP creation response: {0}'.format(fip)) - - -@operation -@with_neutron_client -def delete(neutron_client, **kwargs): - delete_floatingip(neutron_client) - - -@with_neutron_client -def list_floatingips(neutron_client, args, **kwargs): - fip_list = neutron_client.list_floatingips(**args) - add_list_to_runtime_properties(ctx, - FLOATINGIP_OPENSTACK_TYPE, - fip_list.get('floatingips', [])) - - -@operation -@with_neutron_client -def creation_validation(neutron_client, **kwargs): - floatingip_creation_validation(neutron_client, 'floating_ip_address') - - -@operation -@with_neutron_client -def connect_port(neutron_client, **kwargs): - if is_external_relationship_not_conditionally_created(ctx): - return - - port_id = get_openstack_id(ctx.source) - floating_ip_id = get_openstack_id(ctx.target) - fip = {'port_id': port_id} - neutron_client.update_floatingip( - floating_ip_id, {FLOATINGIP_OPENSTACK_TYPE: fip}) - - -@operation -@with_neutron_client -def disconnect_port(neutron_client, **kwargs): - if is_external_relationship(ctx): - ctx.logger.info('Not disassociating floatingip and port since ' - 'external floatingip and port are being used') - return - - floating_ip_id = get_openstack_id(ctx.target) - fip = {'port_id': None} - neutron_client.update_floatingip(floating_ip_id, - {FLOATINGIP_OPENSTACK_TYPE: fip}) diff --git a/neutron_plugin/network.py b/neutron_plugin/network.py deleted file mode 100644 index ad7544d8..00000000 --- a/neutron_plugin/network.py +++ /dev/null @@ -1,111 +0,0 @@ -######### -# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - -from cloudify import ctx -from cloudify.decorators import operation -from cloudify.exceptions import NonRecoverableError -from openstack_plugin_common import ( - with_neutron_client, - is_external_resource, - is_external_resource_not_conditionally_created, - delete_resource_and_runtime_properties, - use_external_resource, - validate_resource, - create_object_dict, - get_openstack_id, - set_neutron_runtime_properties, - add_list_to_runtime_properties, - COMMON_RUNTIME_PROPERTIES_KEYS -) - -NETWORK_OPENSTACK_TYPE = 'network' -ADMIN_STATE_UP = 'admin_state_up' - -# Runtime properties -RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS - - -@operation -@with_neutron_client -def create(neutron_client, args, **kwargs): - - if use_external_resource(ctx, neutron_client, NETWORK_OPENSTACK_TYPE): - return - network = create_object_dict(ctx, - NETWORK_OPENSTACK_TYPE, - args, - {ADMIN_STATE_UP: True}) - - net = neutron_client.create_network( - {NETWORK_OPENSTACK_TYPE: network})[NETWORK_OPENSTACK_TYPE] - set_neutron_runtime_properties(ctx, net, NETWORK_OPENSTACK_TYPE) - - -@operation -@with_neutron_client -def start(neutron_client, **kwargs): - network_id = get_openstack_id(ctx) - - if is_external_resource_not_conditionally_created(ctx): - ctx.logger.info('Validating external network is started') - if not neutron_client.show_network( - network_id)[NETWORK_OPENSTACK_TYPE][ADMIN_STATE_UP]: - raise NonRecoverableError( - 'Expected external resource network {0} to be in ' - '"admin_state_up"=True'.format(network_id)) - return - - neutron_client.update_network( - network_id, { - NETWORK_OPENSTACK_TYPE: { - ADMIN_STATE_UP: True - } - }) - - -@operation -@with_neutron_client -def stop(neutron_client, **kwargs): - if is_external_resource(ctx): - ctx.logger.info('Not stopping network since an external network is ' - 'being used') - return - - neutron_client.update_network(get_openstack_id(ctx), { - NETWORK_OPENSTACK_TYPE: { - ADMIN_STATE_UP: False - } - }) - - -@operation -@with_neutron_client -def delete(neutron_client, **kwargs): - delete_resource_and_runtime_properties(ctx, neutron_client, - RUNTIME_PROPERTIES_KEYS) - - -@with_neutron_client -def list_networks(neutron_client, args, **kwargs): - net_list = neutron_client.list_networks(**args) - add_list_to_runtime_properties(ctx, - NETWORK_OPENSTACK_TYPE, - net_list.get('networks', [])) - - -@operation -@with_neutron_client -def creation_validation(neutron_client, **kwargs): - validate_resource(ctx, neutron_client, NETWORK_OPENSTACK_TYPE) diff --git a/neutron_plugin/port.py b/neutron_plugin/port.py deleted file mode 100644 index c693ac23..00000000 --- a/neutron_plugin/port.py +++ /dev/null @@ -1,480 +0,0 @@ -######### -# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - -import netaddr - -from cloudify import ctx -from cloudify.decorators import operation -from cloudify.exceptions import NonRecoverableError - -import neutronclient.common.exceptions as neutron_exceptions - -from openstack_plugin_common import ( - with_neutron_client, - with_nova_client, - get_openstack_id_of_single_connected_node_by_openstack_type, - get_openstack_ids_of_connected_nodes_by_openstack_type, - delete_resource_and_runtime_properties, - delete_runtime_properties, - use_external_resource, - is_external_relationship, - add_list_to_runtime_properties, - validate_resource, - get_openstack_id, - set_neutron_runtime_properties, - create_object_dict, - COMMON_RUNTIME_PROPERTIES_KEYS, - OPENSTACK_ID_PROPERTY, - is_external_relationship_not_conditionally_created) - -from neutron_plugin.network import NETWORK_OPENSTACK_TYPE -from neutron_plugin.subnet import SUBNET_OPENSTACK_TYPE -from neutron_plugin.security_group import SG_OPENSTACK_TYPE -from openstack_plugin_common.floatingip import get_server_floating_ip - -PORT_OPENSTACK_TYPE = 'port' -PORT_ALLOWED_ADDRESS = 'allowed_address_pairs' -PORT_ADDRESS_REL_TYPE = 'cloudify.openstack.port_connected_to_floating_ip' - -# Runtime properties -FIXED_IP_ADDRESS_PROPERTY = 'fixed_ip_address' # the fixed ip address -MAC_ADDRESS_PROPERTY = 'mac_address' # the mac address -RUNTIME_PROPERTIES_KEYS = \ - COMMON_RUNTIME_PROPERTIES_KEYS + [FIXED_IP_ADDRESS_PROPERTY, - MAC_ADDRESS_PROPERTY] - -NO_SG_PORT_CONNECTION_RETRY_INTERVAL = 3 - - -def _port_update(neutron_client, port_id, args, ext_port): - runtime_properties = ctx.instance.runtime_properties - updated_params = create_object_dict(ctx, PORT_OPENSTACK_TYPE, args, {}) - if updated_params: - if PORT_ALLOWED_ADDRESS in updated_params: - allowed_addpairs = ext_port.get(PORT_ALLOWED_ADDRESS, []) - for allowed_addpair in updated_params[PORT_ALLOWED_ADDRESS]: - for old_addpair in allowed_addpairs: - old_ip = old_addpair.get('ip_address') - if old_ip == allowed_addpair.get('ip_address'): - raise NonRecoverableError( - 'Ip {} is already assigned to {}.' - .format(old_ip, port_id)) - else: - allowed_addpairs.append(allowed_addpair) - - change = { - PORT_OPENSTACK_TYPE: { - PORT_ALLOWED_ADDRESS: allowed_addpairs - } - } - neutron_client.update_port(port_id, change) - runtime_properties[PORT_ALLOWED_ADDRESS] = allowed_addpairs - ctx.logger.info("Applied: {}".format(repr(change))) - # update network id - net_id = get_openstack_id_of_single_connected_node_by_openstack_type( - ctx, NETWORK_OPENSTACK_TYPE, True) - if net_id: - if neutron_client.show_port( - port_id)[PORT_OPENSTACK_TYPE]['network_id'] != net_id: - raise NonRecoverableError( - 'Expected external resources port {0} and network {1} ' - 'to be connected'.format(port_id, net_id)) - # update port ip and mac - runtime_properties[FIXED_IP_ADDRESS_PROPERTY] = _get_fixed_ip(ext_port) - runtime_properties[MAC_ADDRESS_PROPERTY] = ext_port['mac_address'] - - -@operation -@with_neutron_client -def create(neutron_client, args, **kwargs): - - ext_port = use_external_resource(ctx, neutron_client, PORT_OPENSTACK_TYPE) - if ext_port: - try: - port_id = get_openstack_id(ctx) - _port_update(neutron_client, port_id, args, ext_port) - return - except Exception: - delete_runtime_properties(ctx, RUNTIME_PROPERTIES_KEYS) - raise - - net_id = ctx.node.properties.get( - PORT_OPENSTACK_TYPE, {}).get('network_id') - if not net_id: - net_id = \ - get_openstack_id_of_single_connected_node_by_openstack_type( - ctx, NETWORK_OPENSTACK_TYPE) - - port = create_object_dict(ctx, - PORT_OPENSTACK_TYPE, - args, - {'network_id': net_id}) - _handle_fixed_ips(port, neutron_client) - _handle_security_groups(port) - - p = neutron_client.create_port( - {PORT_OPENSTACK_TYPE: port})[PORT_OPENSTACK_TYPE] - - set_neutron_runtime_properties(ctx, p, PORT_OPENSTACK_TYPE) - ctx.instance.runtime_properties[FIXED_IP_ADDRESS_PROPERTY] = \ - _get_fixed_ip(p) - ctx.instance.runtime_properties[MAC_ADDRESS_PROPERTY] = p['mac_address'] - - -@operation -@with_nova_client -@with_neutron_client -def attach(nova_client, neutron_client, **kwargs): - - if is_external_relationship(ctx): - ctx.logger.info('Not attaching port from server since ' - 'external port and server are being used') - return - - server_id = get_openstack_id(ctx.source) - port_id = get_openstack_id(ctx.target) - port = neutron_client.show_port(port_id) - server = nova_client.servers.get(server_id) - network = neutron_client.show_network(port['port']['network_id']) - network_name = network['network']['name'] - - floating_ip_address = None - for target in ctx.target.instance.relationships: - if target.type == PORT_ADDRESS_REL_TYPE: - target_instance = target.target.instance - floatingip_id = \ - target_instance.runtime_properties[OPENSTACK_ID_PROPERTY] - floating_ip = neutron_client.show_floatingip(floatingip_id) - floating_ip_address = \ - floating_ip['floatingip']['floating_ip_address'] - - server_addresses = \ - [addr['addr'] for addr in server.addresses[network_name]] - - if floating_ip_address and floating_ip_address not in server_addresses: - ctx.logger.info('We will attach floating ip {0} to server {1}' - .format(floating_ip_address, server_id)) - server.add_floating_ip(floating_ip_address) - return ctx.operation.retry( - message='Waiting for the floating ip {0} to ' - 'attach to server {1}..' - .format(floating_ip_address, - server_id), - retry_after=10) - change = { - PORT_OPENSTACK_TYPE: { - 'device_id': server_id, - } - } - device_id = port['port'].get('device_id') - if not device_id or device_id != server_id: - ctx.logger.info('Attaching port {0}...'.format(port_id)) - neutron_client.update_port(port_id, change) - ctx.logger.info('Successfully attached port {0}'.format(port_id)) - else: - ctx.logger.info( - 'Skipping port {0} attachment, ' - 'because it is already attached ' - 'to device (server) id {1}.'.format(port_id, device_id)) - - -def _port_delete(neutron_client, port_id, ext_port): - updated_params = ctx.node.properties.get(PORT_OPENSTACK_TYPE) - if updated_params: - if PORT_ALLOWED_ADDRESS in updated_params: - ips_for_remove = [] - updated_pairs = [] - allowed_addpairs = ext_port.get(PORT_ALLOWED_ADDRESS, []) - # ip's for remove - for allowed_addpair in updated_params[PORT_ALLOWED_ADDRESS]: - ips_for_remove.append(allowed_addpair.get('ip_address')) - # cleanup ip's - for old_addpair in allowed_addpairs: - old_ip = old_addpair.get('ip_address') - if old_ip not in ips_for_remove: - updated_pairs.append(old_addpair) - # apply changes - change = { - PORT_OPENSTACK_TYPE: { - PORT_ALLOWED_ADDRESS: updated_pairs - } - } - neutron_client.update_port(port_id, change) - ctx.logger.info("Applied on remove: {}".format(repr(change))) - - -@operation -@with_neutron_client -def delete(neutron_client, **kwargs): - try: - # clean up external resource - ext_port = use_external_resource(ctx, neutron_client, - PORT_OPENSTACK_TYPE) - if ext_port: - port_id = get_openstack_id(ctx) - _port_delete(neutron_client, port_id, ext_port) - # remove port if need - delete_resource_and_runtime_properties(ctx, neutron_client, - RUNTIME_PROPERTIES_KEYS) - except neutron_exceptions.NeutronClientException as e: - if e.status_code == 404: - # port was probably deleted when an attached device was deleted - delete_runtime_properties(ctx, RUNTIME_PROPERTIES_KEYS) - else: - raise - - -@operation -@with_nova_client -@with_neutron_client -def detach(nova_client, neutron_client, **kwargs): - - if is_external_relationship(ctx): - ctx.logger.info('Not detaching port from server since ' - 'external port and server are being used') - return - - port_id = get_openstack_id(ctx.target) - server_id = get_openstack_id(ctx.source) - - server_floating_ip = get_server_floating_ip(neutron_client, server_id) - if server_floating_ip: - ctx.logger.info('We have floating ip {0} attached to server' - .format(server_floating_ip['floating_ip_address'])) - server = nova_client.servers.get(server_id) - try: - server.remove_floating_ip( - server_floating_ip['floating_ip_address']) - except AttributeError: - # To support version mismatch. - neutron_client.update_floatingip( - server_floating_ip['id'], {'floatingip': {'port_id': None}}) - return ctx.operation.retry( - message='Waiting for the floating ip {0} to ' - 'detach from server {1}..' - .format(server_floating_ip['floating_ip_address'], - server_id), - retry_after=10) - change = { - PORT_OPENSTACK_TYPE: { - 'device_id': '', - 'device_owner': '' - } - } - ctx.logger.info('Detaching port {0}...'.format(port_id)) - neutron_client.update_port(port_id, change) - ctx.logger.info('Successfully detached port {0}'.format(port_id)) - - -@operation -@with_neutron_client -def connect_security_group(neutron_client, **kwargs): - port_id = get_openstack_id(ctx.source) - security_group_id = get_openstack_id(ctx.target) - - if is_external_relationship_not_conditionally_created(ctx): - ctx.logger.info('Validating external port and security-group are ' - 'connected') - if any(sg for sg in neutron_client.show_port(port_id)['port'].get( - 'security_groups', []) if sg == security_group_id): - return - raise NonRecoverableError( - 'Expected external resources port {0} and security-group {1} to ' - 'be connected'.format(port_id, security_group_id)) - - # WARNING: non-atomic operation - port = neutron_client.cosmo_get(PORT_OPENSTACK_TYPE, id=port_id) - ctx.logger.info( - "connect_security_group(): source_id={0} target={1}".format( - port_id, ctx.target.instance.runtime_properties)) - # We could just pass the port['security_groups'] - # dict here with a new element, however we need to test - # a race condition in Openstack so we need to copy the security - # group list. - sgs = port['security_groups'][:] - if security_group_id not in port['security_groups']: - sgs.append(security_group_id) - neutron_client.update_port(port_id, - {PORT_OPENSTACK_TYPE: {'security_groups': sgs}}) - - # Double check if SG has been actually updated (a race-condition - # in OpenStack): - port_info = neutron_client.show_port(port_id)['port'] - port_security_groups = port_info.get('security_groups', []) - if security_group_id not in port_security_groups: - return ctx.operation.retry( - message='Security group connection (`{0}\' -> `{1}\')' - ' has not been established!'.format(port_id, - security_group_id), - retry_after=NO_SG_PORT_CONNECTION_RETRY_INTERVAL - ) - - -@operation -@with_neutron_client -def disconnect_security_group(neutron_client, **kwargs): - port_id = get_openstack_id(ctx.source) - security_group_id = get_openstack_id(ctx.target) - - if is_external_relationship_not_conditionally_created(ctx): - ctx.logger.info( - 'Port {0} and Security Group {1} are external resources. ' - 'Not performing disconnect.') - return - - port = neutron_client.cosmo_get(PORT_OPENSTACK_TYPE, id=port_id) - sgs = port['security_groups'][:] - if security_group_id not in port['security_groups']: - return - sgs.remove(security_group_id) - neutron_client.update_port(port_id, - {PORT_OPENSTACK_TYPE: {'security_groups': sgs}}) - port_info = neutron_client.show_port(port_id)['port'] - port_security_groups = port_info.get('security_groups', []) - if security_group_id in port_security_groups: - return ctx.operation.retry( - message='Security group connection (`{0}\' -> `{1}\')' - ' has not been established!'.format(port_id, - security_group_id), - retry_after=NO_SG_PORT_CONNECTION_RETRY_INTERVAL - ) - - -@with_neutron_client -def list_ports(neutron_client, args, **kwargs): - port_list = neutron_client.list_ports(**args) - add_list_to_runtime_properties(ctx, - PORT_OPENSTACK_TYPE, - port_list.get('ports', [])) - - -@operation -@with_neutron_client -def creation_validation(neutron_client, **kwargs): - validate_resource(ctx, neutron_client, PORT_OPENSTACK_TYPE) - - -def _get_fixed_ip(port): - # a port may have no fixed IP if it's set on a network without subnets - return port['fixed_ips'][0]['ip_address'] if port['fixed_ips'] else None - - -def _valid_subnet_ip(ip_address, subnet_dict): - """Check if ip_address is valid for subnet_dict['cidr'] - - :param ip_address: string - :param subnet_dict: dict with 'cidr' string - :return: bool - """ - - try: - cidr = subnet_dict.get('subnet', {}).get('cidr') - ctx.logger.debug('Check ip {ip_address} in subnet {cidr}'.format( - ip_address=repr(ip_address), - cidr=repr(cidr))) - if netaddr.IPAddress(ip_address) in netaddr.IPNetwork(cidr): - return True - except TypeError: - pass - return False - - -def _handle_fixed_ips(port, neutron_client): - """Combine IPs and Subnets for the Port fixed IPs list. - - The Port object looks something this: - { - 'port': { - 'id': 'some-id', - 'fixed_ips': [ - {'subnet_id': 'subnet1', 'ip_address': '1.2.3.4'}, - {'ip_address': '1.2.3.5'}, - {'subnet_id': 'subnet3'}, - ] - ...snip... - } - - We need to combine subnets and ips from three sources: - 1) Fixed IPs and Subnets from the Port object. - 2) Subnets from relationships to subnets. - 3) A Fixed IP from node properties. - - There are some issues: - 1) Users can provide both subnets and relationships to subnets. - 2) Recurrences of the subnet_id indicate a desire - for multiple IPs on that subnet. - 3) If we provide a fixed_ip, we don't also know the - target subnet because of how the node properties are. - We should change that. - Have not yet changed that. - But will need to support both paths anyway. - - :param port: An Openstack API Port Object. - :param neutron_client: Openstack Neutron Client. - :return: None - """ - - fixed_ips = port.get('fixed_ips', []) - subnet_ids_from_port = [net.get('subnet_id') for net in fixed_ips] - subnet_ids_from_rels = \ - get_openstack_ids_of_connected_nodes_by_openstack_type( - ctx, SUBNET_OPENSTACK_TYPE) - - # Add the subnets from relationships to the port subnets. - for subnet_from_rel in subnet_ids_from_rels: - if subnet_from_rel not in subnet_ids_from_port: - fixed_ips.append({'subnet_id': subnet_from_rel}) - - addresses = [ip.get('ip_address') for ip in fixed_ips] - fixed_ip_from_props = ctx.node.properties['fixed_ip'] - - # If we have a fixed_ip from node props, we need to add it, - # but first try to match it with one of our subnets. - # The fixed_ip_element should be one of: - # 1) {'ip_address': 'x.x.x.x'} - # 2) {'subnet_id': '....'} - # 3) {'ip_address': 'x.x.x.x', 'subnet_id': '....'} - # show_subnet returns something like this: - # subnet = { - # 'subnet': { - # 'id': 'subnet1', - # 'cidr': '1.2.3.4/24', - # 'allocation_pools': [], - # ...snip... - # } - # } - if fixed_ip_from_props and not (fixed_ip_from_props in addresses): - fixed_ip_element = {'ip_address': fixed_ip_from_props} - for fixed_ip in fixed_ips: - subnet_id = fixed_ip.get('subnet_id') - if not _valid_subnet_ip( - fixed_ip_from_props, - neutron_client.show_subnet(subnet_id)): - continue - fixed_ip_element['subnet_id'] = subnet_id - del fixed_ips[fixed_ips.index(fixed_ip)] - break - fixed_ips.append(fixed_ip_element) - - # Finally update the object. - if fixed_ips: - port['fixed_ips'] = fixed_ips - - -def _handle_security_groups(port): - security_groups = get_openstack_ids_of_connected_nodes_by_openstack_type( - ctx, SG_OPENSTACK_TYPE) - if security_groups: - port['security_groups'] = security_groups diff --git a/neutron_plugin/rbac_policy.py b/neutron_plugin/rbac_policy.py deleted file mode 100644 index 9bc07467..00000000 --- a/neutron_plugin/rbac_policy.py +++ /dev/null @@ -1,191 +0,0 @@ -######### -# Copyright (c) 2018 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - -from cloudify import ctx -from cloudify.decorators import operation -from cloudify.exceptions import NonRecoverableError - -from openstack_plugin_common import ( - with_neutron_client, - use_external_resource, - create_object_dict, - add_list_to_runtime_properties, - set_neutron_runtime_properties, - get_relationships_by_relationship_type, - delete_resource_and_runtime_properties, - COMMON_RUNTIME_PROPERTIES_KEYS, - OPENSTACK_ID_PROPERTY, - OPENSTACK_TYPE_PROPERTY -) - -RBAC_POLICY_OPENSTACK_TYPE = 'rbac_policy' -RBAC_POLICY_APPLIED_FOR_RELATIONSHIP_TYPE = \ - 'cloudify.openstack.rbac_policy_applied_to' - - -def find_resource_to_apply_rbac_policy(ctx): - found_relationships = get_relationships_by_relationship_type( - ctx, - RBAC_POLICY_APPLIED_FOR_RELATIONSHIP_TYPE - ) - - if len(found_relationships) == 0: - ctx.logger.info( - 'Resource for which RBAC policy may be applied ' - 'not found using {} relationship' - .format(RBAC_POLICY_APPLIED_FOR_RELATIONSHIP_TYPE) - ) - - return {} - - if len(found_relationships) > 1: - raise NonRecoverableError( - 'Multiple ({0}) resources for which RBAC policy may be applied ' - 'found using relationship {1}' - .format( - len(found_relationships), - RBAC_POLICY_APPLIED_FOR_RELATIONSHIP_TYPE - ) - ) - - found_resource = found_relationships[0].target.instance - ctx.logger.info( - '{0} resource for which RBAC policy may be applied ' - 'found using {1} relationship)' - .format(found_resource, RBAC_POLICY_APPLIED_FOR_RELATIONSHIP_TYPE) - ) - - id = found_resource.runtime_properties.get(OPENSTACK_ID_PROPERTY, None) - type = found_resource.runtime_properties.get( - OPENSTACK_TYPE_PROPERTY, - None - ) - - if not id or not type: - ctx.logger.warn( - 'Found using relationship resource has not defined either ' - '"id" or "type" runtime_property. Skipping.' - ) - - return {} - - return { - 'object_type': type, - 'object_id': id - } - - -def validate_found_resource(input_dict, found_resource): - if found_resource: - for key in found_resource.keys(): - if key in input_dict and input_dict.get(key): - raise NonRecoverableError( - 'Multiple definitions of resource for which ' - 'RBAC policy should be applied. ' - 'You specified it both using properties / operation ' - 'inputs and relationship.' - ) - - -def create_rbac_policy_object_dict(ctx, args): - found_resource = find_resource_to_apply_rbac_policy(ctx) - validate_found_resource( - ctx.node.properties.get(RBAC_POLICY_OPENSTACK_TYPE, {}), - found_resource - ) - - validate_found_resource( - args.get(RBAC_POLICY_OPENSTACK_TYPE, {}), - found_resource - ) - - rbac_policy = create_object_dict( - ctx, - RBAC_POLICY_OPENSTACK_TYPE, - args, - found_resource - ) - - return rbac_policy - - -@operation -@with_neutron_client -def create(neutron_client, args, **kwargs): - if use_external_resource(ctx, neutron_client, RBAC_POLICY_OPENSTACK_TYPE): - return - - rbac_policy_raw = create_rbac_policy_object_dict(ctx, args) - ctx.logger.info('rbac_policy: {0}'.format(rbac_policy_raw)) - - rbac_policy = rbac_policy_raw.copy() - rbac_policy.pop('name', None) # rbac_policy doesn't accept name parameter - - rp = neutron_client.create_rbac_policy({ - RBAC_POLICY_OPENSTACK_TYPE: rbac_policy - })[RBAC_POLICY_OPENSTACK_TYPE] - rp['name'] = None - - set_neutron_runtime_properties(ctx, rp, RBAC_POLICY_OPENSTACK_TYPE) - - -@operation -@with_neutron_client -def delete(neutron_client, **kwargs): - delete_resource_and_runtime_properties( - ctx, - neutron_client, - COMMON_RUNTIME_PROPERTIES_KEYS - ) - - -@operation -@with_neutron_client -def list_rbac_policies(neutron_client, args, **kwargs): - rbac_policies = neutron_client.list_rbac_policies(**args) - add_list_to_runtime_properties( - ctx, - RBAC_POLICY_OPENSTACK_TYPE, - rbac_policies.get('rbac_policies', []) - ) - - -@operation -@with_neutron_client -def find_and_delete(neutron_client, args, **kwargs): - reference_rbac_policy = create_rbac_policy_object_dict(ctx, args) - reference_rbac_policy.pop('name', None) - rbac_policies_list = neutron_client.list_rbac_policies() \ - .get('rbac_policies', []) - - for rbac_policy in rbac_policies_list: - if all( - item in rbac_policy.items() - for item - in reference_rbac_policy.items() - ): - id = rbac_policy['id'] - ctx.logger.info( - 'Found RBAC policy with ID: {0} - deleting ...'.format(id) - ) - - neutron_client.cosmo_delete_resource( - RBAC_POLICY_OPENSTACK_TYPE, - id - ) - - return - - ctx.logger.warn('No suitable RBAC policy found') diff --git a/neutron_plugin/router.py b/neutron_plugin/router.py deleted file mode 100644 index 3e0307f6..00000000 --- a/neutron_plugin/router.py +++ /dev/null @@ -1,448 +0,0 @@ -######### -# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - -import warnings - -from cloudify import ctx -from cloudify.decorators import operation -from cloudify.exceptions import NonRecoverableError -try: - from cloudify.context import RELATIONSHIP_INSTANCE -except ImportError: - from cloudify.constants import ( - RELATIONSHIP_INSTANCE, - ) - -from openstack_plugin_common import ( - provider, - get_openstack_id, - get_openstack_type, - with_neutron_client, - use_external_resource, - is_external_relationship, - is_external_relationship_not_conditionally_created, - is_external_resource_not_conditionally_created, - delete_runtime_properties, - get_relationships_by_relationship_type, - get_openstack_ids_of_connected_nodes_by_openstack_type, - delete_resource_and_runtime_properties, - get_resource_by_name_or_id, - validate_resource, - create_object_dict, - set_neutron_runtime_properties, - add_list_to_runtime_properties, - COMMON_RUNTIME_PROPERTIES_KEYS, - OPENSTACK_TYPE_PROPERTY, - OPENSTACK_ID_PROPERTY, -) - -from neutron_plugin.network import NETWORK_OPENSTACK_TYPE -from neutronclient.common.exceptions import NeutronClientException - -ROUTER_OPENSTACK_TYPE = 'router' -ROUTES_OPENSTACK_TYPE = 'routes' -ROUTES_OPENSTACK_NODE_TYPE = 'cloudify.openstack.nodes.Routes' -ROUTES_OPENSTACK_RELATIONSHIP = 'cloudify.openstack.route_connected_to_router' - -# Runtime properties -RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS - - -def _update_router_routes(neutron_client, args, **kwargs): - - from copy import deepcopy - - def dict_merge(a, b): - if isinstance(a, list) and isinstance(b, list): - a.extend(b) - return a - if not isinstance(b, dict): - return b - result = deepcopy(a) - for k, v in b.iteritems(): - if k in result: - ctx.logger.info('Match {0}'.format(k)) - result[k] = dict_merge(result[k], v) - ctx.logger.info('Match Routes {0}'.format(k)) - return result - - # Find out if the update script is being called - # from a relationship or a node operation. - router = _get_router_from_relationship(neutron_client) - - router_id = router[ROUTER_OPENSTACK_TYPE].pop('id') - new_router = {ROUTER_OPENSTACK_TYPE: {}} - for key, value in args.items(): - new_router['router'][key] = value - - for ro_attribute in ['status', 'tenant_id']: - try: - del router[ROUTER_OPENSTACK_TYPE][ro_attribute] - except KeyError: - pass - - new_router = dict_merge(new_router, router) - return neutron_client.update_router(router_id, new_router) - - -@operation -@with_neutron_client -def create(neutron_client, args, **kwargs): - - if use_external_resource(ctx, neutron_client, ROUTER_OPENSTACK_TYPE): - try: - ext_net_id_by_rel = _get_connected_ext_net_id(neutron_client) - - if ext_net_id_by_rel: - router_id = get_openstack_id(ctx) - - router = neutron_client.show_router(router_id)['router'] - if not (router['external_gateway_info'] and 'network_id' in - router['external_gateway_info'] and - router['external_gateway_info']['network_id'] == - ext_net_id_by_rel): - raise NonRecoverableError( - 'Expected external resources router {0} and ' - 'external network {1} to be connected'.format( - router_id, ext_net_id_by_rel)) - return - except Exception: - delete_runtime_properties(ctx, RUNTIME_PROPERTIES_KEYS) - raise - - router = create_object_dict(ctx, ROUTER_OPENSTACK_TYPE, args, {}) - ctx.logger.info('router: {0}'.format(router)) - - _handle_external_network_config(router, neutron_client) - - r = neutron_client.create_router( - {ROUTER_OPENSTACK_TYPE: router})[ROUTER_OPENSTACK_TYPE] - - set_neutron_runtime_properties(ctx, r, ROUTER_OPENSTACK_TYPE) - - -@operation -@with_neutron_client -def update(neutron_client, args, **kwargs): - if not args: - raise NonRecoverableError( - 'args must be provided to update ' - 'router {0}'.format(kwargs.get('resource_id')) - ) - - router_id = ctx.instance.runtime_properties.get(OPENSTACK_ID_PROPERTY) - if not router_id: - raise NonRecoverableError( - 'Router {0} is missing '.format(OPENSTACK_ID_PROPERTY) - ) - - return neutron_client.update_router(router_id, args) - - -@operation -@with_neutron_client -def update_routes(neutron_client, args, **kwargs): - routes = args.get(ROUTES_OPENSTACK_TYPE) - if not routes: - raise NonRecoverableError('routes param is required and must be ' - 'provided when creating static routes !!') - # Force to pass only the "routes" provided by the node properties - routes_args = {'routes': routes} - - # This will update the router and add new static routes based on the - # routes param provided by the "cloudify.openstack.nodes.Routes" - r = _update_router_routes(neutron_client, routes_args, **kwargs) - router = r.get(ROUTER_OPENSTACK_TYPE) - if r and router: - # If the current context type is a relationship then update the - # source instance "runtime_properties" otherwise just update the - # current instance "runtime_properties" - if ctx.type == RELATIONSHIP_INSTANCE: - ctx.source.instance.\ - runtime_properties[ROUTES_OPENSTACK_TYPE] = routes - else: - ctx.instance.runtime_properties[ROUTES_OPENSTACK_TYPE] = routes - else: - raise NonRecoverableError( - 'Failed while trying to retrieve router instance') - - -@operation -@with_neutron_client -def add_routes(neutron_client, args, **kwargs): - - # Since routes is part of router and not single API resource for routes - # "router" resource is used - router = use_external_resource(ctx, neutron_client, ROUTER_OPENSTACK_TYPE) - if router: - # Update routes as part of runtime properties - ctx.instance.runtime_properties[ROUTES_OPENSTACK_TYPE]\ - = router[ROUTES_OPENSTACK_TYPE] - # Update type to match it as routes types - ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY]\ - = ROUTES_OPENSTACK_TYPE - return - - routes = ctx.node.properties.get(ROUTES_OPENSTACK_TYPE, {}) - - if not routes: - raise NonRecoverableError('routes param is required and must be ' - 'provided when creating static routes !!') - - # Force to pass only the "routes" provided by the node properties - routes_args = {'routes': routes} - - # This will update the router and add new static routes based on the - # routes param provided by the "cloudify.openstack.nodes.Routes" - r = _update_router_routes(neutron_client, routes_args, **kwargs) - router = r.get(ROUTER_OPENSTACK_TYPE) - if r and router: - set_neutron_runtime_properties(ctx, router, ROUTES_OPENSTACK_TYPE) - ctx.instance.runtime_properties[ROUTES_OPENSTACK_TYPE] = routes - else: - raise NonRecoverableError( - 'Failed while trying to retrieve router instance') - - -@operation -@with_neutron_client -def connect_subnet(neutron_client, **kwargs): - router_id = get_openstack_id(ctx.target) - subnet_id = get_openstack_id(ctx.source) - - if is_external_relationship_not_conditionally_created(ctx): - ctx.logger.info('Validating external subnet and router ' - 'are associated') - for port in neutron_client.list_ports(device_id=router_id)['ports']: - for fixed_ip in port.get('fixed_ips', []): - if fixed_ip.get('subnet_id') == subnet_id: - return - raise NonRecoverableError( - 'Expected external resources router {0} and subnet {1} to be ' - 'connected'.format(router_id, subnet_id)) - - neutron_client.add_interface_router(router_id, {'subnet_id': subnet_id}) - - -@operation -@with_neutron_client -def disconnect_subnet(neutron_client, **kwargs): - if is_external_relationship(ctx): - ctx.logger.info('Not connecting subnet and router since external ' - 'subnet and router are being used') - return - node_routes = ctx.source.instance.runtime_properties.get( - ROUTES_OPENSTACK_TYPE) - - # Only delete routes only if it has "routes" as runtime properties - if node_routes: - _delete_routes(neutron_client) - - neutron_client.remove_interface_router(get_openstack_id(ctx.target), { - 'subnet_id': get_openstack_id(ctx.source) - } - ) - - -@operation -@with_neutron_client -def delete(neutron_client, **kwargs): - delete_resource_and_runtime_properties(ctx, neutron_client, - RUNTIME_PROPERTIES_KEYS) - - -@operation -@with_neutron_client -def delete_routes(neutron_client, **kwargs): - - _delete_routes(neutron_client) - delete_runtime_properties(ctx, RUNTIME_PROPERTIES_KEYS) - - -@with_neutron_client -def list_routers(neutron_client, args, **kwargs): - router_list = neutron_client.list_routers(**args) - add_list_to_runtime_properties(ctx, - ROUTER_OPENSTACK_TYPE, - router_list.get('routers', [])) - - -@operation -@with_neutron_client -def creation_validation(neutron_client, **kwargs): - validate_resource(ctx, neutron_client, ROUTER_OPENSTACK_TYPE) - - -def _insert_ext_net_id_to_router_config(ext_net_id, router): - router['external_gateway_info'] = router.get( - 'external_gateway_info', {}) - router['external_gateway_info']['network_id'] = ext_net_id - - -def _handle_external_network_config(router, neutron_client): - # attempting to find an external network for the router to connect to - - # first by either a network name or id passed in explicitly; then by a - # network connected by a relationship; with a final optional fallback to an - # external network set in the Provider-context. Otherwise the router will - # simply not get connected to an external network - - provider_context = provider(ctx) - - ext_net_id_by_rel = _get_connected_ext_net_id(neutron_client) - ext_net_by_property = ctx.node.properties['external_network'] - - # the following is meant for backwards compatibility with the - # 'network_name' sugaring - if 'external_gateway_info' in router and 'network_name' in \ - router['external_gateway_info']: - warnings.warn( - 'Passing external "network_name" inside the ' - 'external_gateway_info key of the "router" property is now ' - 'deprecated; Use the "external_network" property instead', - DeprecationWarning) - - ext_net_by_property = router['external_gateway_info']['network_name'] - del (router['external_gateway_info']['network_name']) - - # need to check if the user explicitly passed network_id in the external - # gateway configuration as it affects external network behavior by - # relationship and/or provider context - if 'external_gateway_info' in router and 'network_id' in \ - router['external_gateway_info']: - ext_net_by_property = \ - router['external_gateway_info'].get('network_name') - - if ext_net_by_property and ext_net_id_by_rel: - raise RuntimeError( - "Router can't have an external network connected by both a " - 'relationship and by a network name/id') - - if ext_net_by_property: - ext_net_id = get_resource_by_name_or_id( - ext_net_by_property, NETWORK_OPENSTACK_TYPE, neutron_client)['id'] - _insert_ext_net_id_to_router_config(ext_net_id, router) - elif ext_net_id_by_rel: - _insert_ext_net_id_to_router_config(ext_net_id_by_rel, router) - elif ctx.node.properties['default_to_managers_external_network'] and \ - provider_context.ext_network: - _insert_ext_net_id_to_router_config(provider_context.ext_network['id'], - router) - - -def _check_if_network_is_external(neutron_client, network_id): - return neutron_client.show_network( - network_id)['network']['router:external'] - - -def _get_connected_ext_net_id(neutron_client): - ext_net_ids = \ - [net_id - for net_id in - get_openstack_ids_of_connected_nodes_by_openstack_type( - ctx, NETWORK_OPENSTACK_TYPE) if - _check_if_network_is_external(neutron_client, net_id)] - - if len(ext_net_ids) > 1: - raise NonRecoverableError( - 'More than one external network is connected to router {0}' - ' by a relationship; External network IDs: {0}'.format( - ext_net_ids)) - - return ext_net_ids[0] if ext_net_ids else None - - -def _get_router_from_relationship(neutron_client): - # Find out if the update script is being called - # from a relationship or a node operation. - - # Only get the "router_rel" if it is not a relationship instance - if ctx.type != RELATIONSHIP_INSTANCE: - - router_rel = get_relationships_by_relationship_type( - ctx, ROUTES_OPENSTACK_RELATIONSHIP) - - if router_rel and ROUTER_OPENSTACK_TYPE in get_openstack_type( - router_rel[0].target): - subject = router_rel[0].target - else: - subject = ctx - - elif ctx.type == RELATIONSHIP_INSTANCE: - if ROUTER_OPENSTACK_TYPE in get_openstack_type(ctx.source): - subject = ctx.source - elif ROUTER_OPENSTACK_TYPE in get_openstack_type(ctx.target): - subject = ctx.target - else: - raise NonRecoverableError( - 'Neither target nor source is {0}'.format( - ROUTER_OPENSTACK_TYPE)) - - try: - router = neutron_client.show_router(get_openstack_id(subject)) - except NeutronClientException as e: - raise NonRecoverableError('Error: {0}'.format(str(e))) - if not isinstance(router, dict) or \ - ROUTER_OPENSTACK_TYPE not in router.keys() or \ - 'id' not in router['router'].keys(): - raise NonRecoverableError( - 'API returned unexpected structure.: {0}'.format(router)) - - return router - - -def _prepare_delete_routes_request(neutron_client): - # Empty the "static routes" for the router connected to the routes - - if ctx.type != RELATIONSHIP_INSTANCE: - node_routes =\ - ctx.instance.runtime_properties.get(ROUTES_OPENSTACK_TYPE) - else: - node_routes =\ - ctx.source.instance.runtime_properties.get(ROUTES_OPENSTACK_TYPE) - - if node_routes is None: - raise NonRecoverableError('Unable to get routes from instance !!') - - router = _get_router_from_relationship(neutron_client) - routes = router[ROUTER_OPENSTACK_TYPE].get(ROUTES_OPENSTACK_TYPE) - - new_router = {ROUTER_OPENSTACK_TYPE: {}} - - for index, main_route in enumerate(routes): - for node_route in node_routes: - if main_route == node_route: - del routes[index] - - new_router[ROUTER_OPENSTACK_TYPE]['id'] =\ - router[ROUTER_OPENSTACK_TYPE].get('id') - new_router[ROUTER_OPENSTACK_TYPE]['routes'] = routes - return new_router - - -def _delete_routes(neutron_client): - new_router = _prepare_delete_routes_request(neutron_client) - if new_router and new_router.get(ROUTER_OPENSTACK_TYPE): - router_id = new_router[ROUTER_OPENSTACK_TYPE].pop('id') - else: - raise NonRecoverableError( - 'Failed while trying to retrieve router instance') - - subject = ctx.source if ctx.type == RELATIONSHIP_INSTANCE else ctx - if not is_external_resource_not_conditionally_created(subject): - ctx.logger.info('deleting {0}'.format(ROUTES_OPENSTACK_TYPE)) - neutron_client.update_router(router_id, new_router) - else: - ctx.logger.info('not deleting {0} since an external {0} is ' - 'being used'.format(ROUTES_OPENSTACK_TYPE)) diff --git a/neutron_plugin/security_group.py b/neutron_plugin/security_group.py deleted file mode 100644 index 51849c87..00000000 --- a/neutron_plugin/security_group.py +++ /dev/null @@ -1,141 +0,0 @@ -######### -# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - -from time import sleep - -from requests.exceptions import RequestException - -from cloudify import ctx -from cloudify.decorators import operation -from cloudify.exceptions import NonRecoverableError -from openstack_plugin_common import ( - transform_resource_name, - with_neutron_client, - delete_resource_and_runtime_properties, - add_list_to_runtime_properties -) -from openstack_plugin_common.security_group import ( - build_sg_data, - process_rules, - use_external_sg, - set_sg_runtime_properties, - delete_sg, - sg_creation_validation, - RUNTIME_PROPERTIES_KEYS -) - -DEFAULT_RULE_VALUES = { - 'direction': 'ingress', - 'ethertype': 'IPv4', - 'port_range_min': 1, - 'port_range_max': 65535, - 'protocol': 'tcp', - 'remote_group_id': None, - 'remote_ip_prefix': '0.0.0.0/0', -} - -SG_OPENSTACK_TYPE = 'security_group' - - -@operation -@with_neutron_client -def create( - neutron_client, args, - status_attempts=10, status_timeout=2, **kwargs -): - - security_group = build_sg_data(args) - if not security_group['description']: - security_group['description'] = ctx.node.properties['description'] - - sg_rules = process_rules(neutron_client, DEFAULT_RULE_VALUES, - 'remote_ip_prefix', 'remote_group_id', - 'port_range_min', 'port_range_max') - - disable_default_egress_rules = ctx.node.properties.get( - 'disable_default_egress_rules') - - if use_external_sg(neutron_client): - return - - transform_resource_name(ctx, security_group) - - sg = neutron_client.create_security_group( - {SG_OPENSTACK_TYPE: security_group})[SG_OPENSTACK_TYPE] - - for attempt in range(max(status_attempts, 1)): - sleep(status_timeout) - try: - neutron_client.show_security_group(sg['id']) - except RequestException as e: - ctx.logger.debug("Waiting for SG to be visible. Attempt {}".format( - attempt)) - else: - break - else: - raise NonRecoverableError( - "Timed out waiting for security_group to exist", e) - - set_sg_runtime_properties(sg, neutron_client) - - try: - if disable_default_egress_rules: - for er in _egress_rules(_rules_for_sg_id(neutron_client, - sg['id'])): - neutron_client.delete_security_group_rule(er['id']) - - for sgr in sg_rules: - sgr['security_group_id'] = sg['id'] - neutron_client.create_security_group_rule( - {'security_group_rule': sgr}) - except Exception: - try: - delete_resource_and_runtime_properties( - ctx, neutron_client, - RUNTIME_PROPERTIES_KEYS) - except Exception as e: - raise NonRecoverableError( - 'Exception while tearing down for retry', e) - raise - - -@operation -@with_neutron_client -def delete(neutron_client, **kwargs): - delete_sg(neutron_client) - - -@with_neutron_client -def list_security_groups(neutron_client, args, **kwargs): - sg_list = neutron_client.list_security_groups(**args) - add_list_to_runtime_properties(ctx, - SG_OPENSTACK_TYPE, - sg_list.get('security_groups', [])) - - -@operation -@with_neutron_client -def creation_validation(neutron_client, **kwargs): - sg_creation_validation(neutron_client, 'remote_ip_prefix') - - -def _egress_rules(rules): - return [rule for rule in rules if rule.get('direction') == 'egress'] - - -def _rules_for_sg_id(neutron_client, id): - rules = neutron_client.list_security_group_rules()['security_group_rules'] - rules = [rule for rule in rules if rule['security_group_id'] == id] - return rules diff --git a/neutron_plugin/subnet.py b/neutron_plugin/subnet.py deleted file mode 100644 index 356916d4..00000000 --- a/neutron_plugin/subnet.py +++ /dev/null @@ -1,106 +0,0 @@ -######### -# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - -from cloudify import ctx -from cloudify.decorators import operation -from cloudify.exceptions import NonRecoverableError -from openstack_plugin_common import ( - with_neutron_client, - get_openstack_id_of_single_connected_node_by_openstack_type, - delete_resource_and_runtime_properties, - delete_runtime_properties, - use_external_resource, - validate_resource, - validate_ip_or_range_syntax, - create_object_dict, - get_openstack_id, - set_neutron_runtime_properties, - add_list_to_runtime_properties, - COMMON_RUNTIME_PROPERTIES_KEYS -) - -from neutron_plugin.network import NETWORK_OPENSTACK_TYPE - -SUBNET_OPENSTACK_TYPE = 'subnet' -NETWORK_ID = 'network_id' -CIDR = 'cidr' - -# Runtime properties -RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS - - -@operation -@with_neutron_client -def create(neutron_client, args, **kwargs): - - if use_external_resource(ctx, neutron_client, SUBNET_OPENSTACK_TYPE): - try: - net_id = \ - get_openstack_id_of_single_connected_node_by_openstack_type( - ctx, NETWORK_OPENSTACK_TYPE, True) - - if net_id: - subnet_id = get_openstack_id(ctx) - - if neutron_client.show_subnet( - subnet_id)[SUBNET_OPENSTACK_TYPE][NETWORK_ID] \ - != net_id: - raise NonRecoverableError( - 'Expected external resources subnet {0} and network' - ' {1} to be connected'.format(subnet_id, net_id)) - return - except Exception: - delete_runtime_properties(ctx, RUNTIME_PROPERTIES_KEYS) - raise - - net_id = get_openstack_id_of_single_connected_node_by_openstack_type( - ctx, NETWORK_OPENSTACK_TYPE) - subnet = create_object_dict(ctx, - SUBNET_OPENSTACK_TYPE, - args, - {NETWORK_ID: net_id}) - - s = neutron_client.create_subnet( - {SUBNET_OPENSTACK_TYPE: subnet})[SUBNET_OPENSTACK_TYPE] - set_neutron_runtime_properties(ctx, s, SUBNET_OPENSTACK_TYPE) - - -@operation -@with_neutron_client -def delete(neutron_client, **kwargs): - delete_resource_and_runtime_properties(ctx, neutron_client, - RUNTIME_PROPERTIES_KEYS) - - -@with_neutron_client -def list_subnets(neutron_client, args, **kwargs): - subnet_list = neutron_client.list_subnets(**args) - add_list_to_runtime_properties(ctx, - SUBNET_OPENSTACK_TYPE, - subnet_list.get('subnets', [])) - - -@operation -@with_neutron_client -def creation_validation(neutron_client, args, **kwargs): - validate_resource(ctx, neutron_client, SUBNET_OPENSTACK_TYPE) - subnet = dict(ctx.node.properties[SUBNET_OPENSTACK_TYPE], **args) - - if CIDR not in subnet: - err = '"cidr" property must appear under the "subnet" property of a ' \ - 'subnet node' - ctx.logger.error('VALIDATION ERROR: ' + err) - raise NonRecoverableError(err) - validate_ip_or_range_syntax(ctx, subnet[CIDR]) diff --git a/neutron_plugin/tests/__init__.py b/neutron_plugin/tests/__init__.py deleted file mode 100644 index 04cb21f7..00000000 --- a/neutron_plugin/tests/__init__.py +++ /dev/null @@ -1 +0,0 @@ -__author__ = 'idanmo' diff --git a/neutron_plugin/tests/resources/test_fip_rel.yaml b/neutron_plugin/tests/resources/test_fip_rel.yaml deleted file mode 100644 index 5bd41fa2..00000000 --- a/neutron_plugin/tests/resources/test_fip_rel.yaml +++ /dev/null @@ -1,27 +0,0 @@ -tosca_definitions_version: cloudify_dsl_1_3 - -imports: - - http://www.getcloudify.org/spec/cloudify/4.2/types.yaml - - plugin.yaml - -dsl_definitions: - - &openstack_config - username: aaa - password: aaa - tenant_name: aaa - auth_url: aaa - -node_templates: - network: - type: cloudify.openstack.nodes.Network - properties: - resource_id: abcdef - openstack_config: *openstack_config - - fip: - type: cloudify.openstack.nodes.FloatingIP - properties: - openstack_config: *openstack_config - relationships: - - target: network - type: cloudify.relationships.connected_to diff --git a/neutron_plugin/tests/resources/test_fip_rel_and_id.yaml b/neutron_plugin/tests/resources/test_fip_rel_and_id.yaml deleted file mode 100644 index f6612036..00000000 --- a/neutron_plugin/tests/resources/test_fip_rel_and_id.yaml +++ /dev/null @@ -1,29 +0,0 @@ -tosca_definitions_version: cloudify_dsl_1_3 - -imports: - - http://www.getcloudify.org/spec/cloudify/4.2/types.yaml - - plugin.yaml - -dsl_definitions: - - &openstack_config - username: aaa - password: aaa - tenant_name: aaa - auth_url: aaa - -node_templates: - network: - type: cloudify.openstack.nodes.Network - properties: - resource_id: abcdef - openstack_config: *openstack_config - - fip: - type: cloudify.openstack.nodes.FloatingIP - properties: - openstack_config: *openstack_config - floatingip: - floating_network_id: 'bogus-id' - relationships: - - target: network - type: cloudify.relationships.connected_to diff --git a/neutron_plugin/tests/test.py b/neutron_plugin/tests/test.py deleted file mode 100644 index 4d83f197..00000000 --- a/neutron_plugin/tests/test.py +++ /dev/null @@ -1,273 +0,0 @@ -######### -# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - -import mock -import random -import string -import unittest - -from cloudify.context import BootstrapContext -from cloudify.state import current_ctx - -from cloudify.mocks import MockCloudifyContext - -import openstack_plugin_common -import openstack_plugin_common.tests.test as common_test - -import neutron_plugin -import neutron_plugin.network -import neutron_plugin.port -import neutron_plugin.router -import neutron_plugin.security_group - - -@mock.patch('openstack_plugin_common.NeutronClientWithSugar') -class ResourcesRenamingTest(unittest.TestCase): - - def setUp(self): - config_get = mock.patch( - 'openstack_plugin_common.Config.get', - mock.Mock(return_value={}), - ) - config_get.start() - self.addCleanup(config_get.stop) - - def _setup_ctx(self, obj_type): - self.ctx = common_test.create_mock_ctx_with_provider_info( - node_id='__cloudify_id_something_001', - properties={ - 'resource_id': 'resource_id', - 'description': 'description', - 'external_network': 'external_network', - obj_type: { - 'name': obj_type + '_name', - }, - 'rules': [] # For security_group - } - ) - current_ctx.set(self.ctx) - self.addCleanup(current_ctx.clear) - return self.ctx - - def _test(self, obj_type, neutron_mock, additional={}): - neutron_mock.cosmo_list = mock.Mock() - neutron_mock.cosmo_list.return_value = [] - attr = getattr(neutron_mock, 'create_' + obj_type) - - obj_return = { - 'id': obj_type + '_id', - 'name': obj_type + '_name' - } - obj_return.update(additional) - - attr.return_value = { - obj_type: obj_return - } - with mock.patch('openstack_plugin_common._find_context_in_kw', - return_value=self.ctx): - getattr(neutron_plugin, obj_type).create( - neutron_client=neutron_mock, - ctx=self.ctx, args={}) - - calls = attr.mock_calls - self.assertEquals(len(calls), 1) # Exactly one object created - # Indexes into call[]: - # 0 - the only call - # 1 - regular arguments - # 0 - first argument - arg = calls[0][1][0] - self.assertEquals(arg[obj_type]['name'], 'p2_' + obj_type + '_name') - - def test_network(self, neutron_mock): - self._setup_ctx('network') - self._test('network', neutron_mock) - - def test_port(self, neutron_mock): - self._setup_ctx('port') - self.ctx.node.properties['fixed_ip'] = "1.2.3.4" - fake_instance = mock.Mock() - fake_instance.target.instance.runtime_properties = { - openstack_plugin_common.OPENSTACK_TYPE_PROPERTY: 'network', - openstack_plugin_common.OPENSTACK_ID_PROPERTY: 'network_id' - } - self.ctx._instance._relationships = [ - fake_instance - ] - self._test('port', neutron_mock, {'fixed_ips': None, - 'mac_address': 'mac_address'}) - - def test_router(self, neutron_mock): - self._setup_ctx('router') - self._test('router', neutron_mock) - - def test_security_group(self, neutron_mock): - self._setup_ctx('security_group') - self._test('security_group', neutron_mock, ) - - # Network chosen arbitrary for this test. - # Just testing something without prefix. - def test_network_no_prefix(self, neutron_mock): - ctx = self._setup_ctx('network') - for pctx in common_test.BOOTSTRAP_CONTEXTS_WITHOUT_PREFIX: - ctx._bootstrap_context = BootstrapContext(pctx) - neutron_mock.create_network.reset_mock() - neutron_mock.create_network.return_value = { - 'network': { - 'id': 'network_id', - 'name': 'network_name', - } - } - - with mock.patch('openstack_plugin_common._find_context_in_kw', - return_value=self.ctx): - neutron_plugin.network.create(neutron_client=neutron_mock, - ctx=self.ctx, args={}) - - neutron_mock.create_network.assert_called_once_with({ - 'network': {'name': 'network_name', 'admin_state_up': True} - }) - - -def _rand_str(n): - chars = string.ascii_uppercase + string.digits - return ''.join(random.choice(chars) for _ in range(n)) - - -@mock.patch('openstack_plugin_common.NeutronClientWithSugar') -class SecurityGroupTest(unittest.TestCase): - - def setUp(self): - # *** Configs from files ******************** - config_get = mock.patch( - 'openstack_plugin_common.Config.get', - mock.Mock(return_value={}), - ) - config_get.start() - self.addCleanup(config_get.stop) - - # context - sg_name = _rand_str(6) + '_new' - self.ctx = MockCloudifyContext( - node_id='test', - deployment_id='test', - properties={ - 'description': 'The best Security Group. Great', - 'resource_id': 'mock_sg', - 'security_group': { - 'name': sg_name, - 'description': 'blah' - }, - 'rules': [{'port': 80}], - 'disable_default_egress_rules': True, - } - ) - current_ctx.set(self.ctx) - self.addCleanup(current_ctx.clear) - - def test_sg_new(self, neutron_mock): - neutron_plugin.security_group._rules_for_sg_id = mock.Mock() - neutron_plugin.security_group._rules_for_sg_id.return_value = [] - - neutron_mock.cosmo_list = mock.Mock() - neutron_mock.cosmo_list.return_value = [] - neutron_mock.create_security_group = mock.Mock() - neutron_mock.create_security_group.return_value = { - 'security_group': { - 'description': 'blah', - 'id': self.ctx._properties['security_group']['name'] + '_id', - } - } - - with mock.patch('openstack_plugin_common._find_context_in_kw', - return_value=self.ctx): - neutron_plugin.security_group.create(neutron_client=neutron_mock, - ctx=self.ctx, args={}) - - neutron_mock.create_security_group.assert_called_once_with({ - 'security_group': { - 'description': 'blah', - 'name': self.ctx._properties['security_group']['name'] - } - }) - - def test_sg_use_existing(self, neutron_mock): - neutron_plugin.security_group._rules_for_sg_id = mock.Mock() - neutron_plugin.security_group._rules_for_sg_id.return_value = [] - - neutron_mock.cosmo_list = mock.Mock() - neutron_mock.cosmo_list.return_value = [{ - 'id': self.ctx._properties['security_group']['name'] + '_ex_id', - 'description': 'blah', - 'security_group_rules': [{ - 'remote_group_id': None, - 'direction': 'ingress', - 'protocol': 'tcp', - 'ethertype': 'IPv4', - 'port_range_max': 80, - 'port_range_min': 80, - 'remote_ip_prefix': '0.0.0.0/0', - }] - }] - neutron_mock.create_security_group = mock.Mock() - neutron_mock.create_security_group.return_value = { - 'security_group': { - 'description': 'blah', - 'id': self.ctx._properties['security_group']['name'] + '_id', - } - } - - with mock.patch('openstack_plugin_common._find_context_in_kw', - return_value=self.ctx): - neutron_plugin.security_group.create(neutron_client=neutron_mock, - ctx=self.ctx, args={}) - - neutron_mock.create_security_group.assert_called_once_with({ - 'security_group': { - 'description': 'blah', - 'name': self.ctx._properties['security_group']['name'] - } - }) - - def test_sg_use_existing_with_other_rules(self, neutron_mock): - neutron_plugin.security_group._rules_for_sg_id = mock.Mock() - neutron_plugin.security_group._rules_for_sg_id.return_value = [] - - neutron_mock.cosmo_list = mock.Mock() - neutron_mock.cosmo_list.return_value = [{ - 'id': self.ctx._properties['security_group']['name'] + '_ex_id', - 'description': 'blah', - 'security_group_rules': [{ - 'remote_group_id': None, - 'direction': 'ingress', - 'protocol': 'tcp', - 'ethertype': 'IPv4', - 'port_range_max': 81, # Note the different port! - 'port_range_min': 81, # Note the different port! - 'remote_ip_prefix': '0.0.0.0/0', - }] - }] - neutron_mock.create_security_group = mock.Mock() - neutron_mock.create_security_group.return_value = { - 'security_group': { - 'description': 'blah', - 'id': self.ctx._properties['security_group']['name'] + '_id', - } - } - neutron_plugin.security_group.create(neutron_client=neutron_mock, - ctx=self.ctx, args={}) - - -if __name__ == '__main__': - unittest.main() diff --git a/neutron_plugin/tests/test_floating_ip.py b/neutron_plugin/tests/test_floating_ip.py deleted file mode 100644 index fd48ac5d..00000000 --- a/neutron_plugin/tests/test_floating_ip.py +++ /dev/null @@ -1,55 +0,0 @@ -import os -import unittest -import mock - -from cloudify.test_utils import workflow_test -from cloudify.mocks import MockNodeInstanceContext - -from neutron_plugin.floatingip import ( - FLOATINGIP_OPENSTACK_TYPE, FLOATING_NETWORK_ERROR_PREFIX) -from openstack_plugin_common import OPENSTACK_ID_PROPERTY - - -class FloatingIPTest(unittest.TestCase): - @mock.patch('neutron_plugin.network.create') - @mock.patch('neutronclient.v2_0.client.Client.create_floatingip') - @workflow_test(os.path.join('resources', 'test_fip_rel.yaml'), - copy_plugin_yaml=True) - def test_network_rel(self, cfy_local, *_): - def _mock_rel(*_): - return MockNodeInstanceContext(runtime_properties={ - OPENSTACK_ID_PROPERTY: 'my-id' - }) - - def _mock_create(_, fip): - self.assertEqual(fip[FLOATINGIP_OPENSTACK_TYPE][ - 'floating_network_id'], 'my-id') - return {FLOATINGIP_OPENSTACK_TYPE: { - 'id': '1234', - 'floating_ip_address': '1.2.3.4' - }} - - with mock.patch('neutronclient.v2_0.client.Client.create_floatingip', - new=_mock_create): - with mock.patch( - 'neutron_plugin.floatingip.get_single_connected_node_by_' - 'openstack_type', new=_mock_rel): - cfy_local.execute('install') - - @mock.patch('neutron_plugin.network.create') - @mock.patch('neutronclient.v2_0.client.Client.create_floatingip') - @workflow_test(os.path.join('resources', 'test_fip_rel_and_id.yaml'), - copy_plugin_yaml=True) - def test_network_rel_and_id(self, cfy_local, *_): - def _mock_rel(*_): - return MockNodeInstanceContext(runtime_properties={ - OPENSTACK_ID_PROPERTY: 'my-id' - }) - - with mock.patch('neutron_plugin.floatingip.get_single_connected_node_' - 'by_openstack_type', - new=_mock_rel): - with self.assertRaises(Exception) as ex: - cfy_local.execute('install') - - self.assertTrue(FLOATING_NETWORK_ERROR_PREFIX in str(ex.exception)) diff --git a/neutron_plugin/tests/test_port.py b/neutron_plugin/tests/test_port.py deleted file mode 100644 index 00b64993..00000000 --- a/neutron_plugin/tests/test_port.py +++ /dev/null @@ -1,315 +0,0 @@ -######## -# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - -import unittest -import mock - -import neutron_plugin.port -from neutron_plugin.security_group import SG_OPENSTACK_TYPE -from cloudify.mocks import (MockCloudifyContext, - MockNodeInstanceContext, - MockRelationshipSubjectContext) -from openstack_plugin_common import (NeutronClientWithSugar, - OPENSTACK_ID_PROPERTY, - OPENSTACK_TYPE_PROPERTY) -from cloudify.exceptions import OperationRetry, NonRecoverableError -from cloudify.state import current_ctx - - -class TestPort(unittest.TestCase): - - def tearDown(self): - current_ctx.clear() - super(TestPort, self).tearDown() - - def test_port_delete(self): - node_props = { - 'fixed_ip': '', - 'port': { - 'allowed_address_pairs': [{ - 'ip_address': '1.2.3.4' - }]}} - mock_neutron = MockNeutronClient(update=True) - _ctx = self._get_mock_ctx_with_node_properties(node_props) - current_ctx.set(_ctx) - with mock.patch('neutron_plugin.port.ctx', _ctx): - # remove new ip - port = {'fixed_ips': [], - 'allowed_address_pairs': [{'ip_address': '1.2.3.4'}, - {'ip_address': '5.6.7.8'}], - 'mac_address': 'abc-edf'} - neutron_plugin.port._port_delete(mock_neutron, "port_id", port) - self.assertEqual( - {'port': {'allowed_address_pairs': [{ - 'ip_address': '5.6.7.8'}]}}, - mock_neutron.body) - - @mock.patch('openstack_plugin_common._handle_kw') - def test_delete(self, *_): - node_props = { - 'fixed_ip': '', - 'port': { - 'allowed_address_pairs': [{ - 'ip_address': '1.2.3.4' - }]}} - mock_neutron = MockNeutronClient(update=True) - _ctx = self._get_mock_ctx_with_node_properties(node_props) - _ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = 'test-sg-id' - _ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] = 'port' - current_ctx.set(_ctx) - with mock.patch('neutron_plugin.port.ctx', _ctx): - port = {'fixed_ips': [], - 'allowed_address_pairs': [{'ip_address': '1.2.3.4'}, - {'ip_address': '5.6.7.8'}], - 'mac_address': 'abc-edf'} - with mock.patch( - 'neutron_plugin.port.use_external_resource', - mock.Mock(return_value=port) - ): - neutron_plugin.port.delete(mock_neutron) - self.assertEqual( - {'port': {'allowed_address_pairs': [{ - 'ip_address': '5.6.7.8'}]}}, - mock_neutron.body) - - def test_port_update(self): - node_props = { - 'fixed_ip': '', - 'resource_id': 'resource_id', - 'port': { - 'allowed_address_pairs': [{ - 'ip_address': '1.2.3.4' - }]}} - mock_neutron = MockNeutronClient(update=True) - _ctx = self._get_mock_ctx_with_node_properties(node_props) - current_ctx.set(_ctx) - with mock.patch('neutron_plugin.port.ctx', _ctx): - port = {'fixed_ips': [], - 'mac_address': 'abc-edf'} - # add new ip - neutron_plugin.port._port_update(mock_neutron, "port_id", {}, port) - self.assertEqual( - { - 'fixed_ip_address': None, - 'allowed_address_pairs': [{'ip_address': '1.2.3.4'}], - 'mac_address': 'abc-edf' - }, - _ctx.instance.runtime_properties) - # readd same ip - port = {'fixed_ips': [], - 'allowed_address_pairs': [{'ip_address': '1.2.3.4'}], - 'mac_address': 'abc-edf'} - with self.assertRaises(NonRecoverableError): - neutron_plugin.port._port_update(mock_neutron, "port_id", - {}, port) - - @mock.patch('openstack_plugin_common._handle_kw') - def test_create(self, *_): - node_props = { - 'fixed_ip': '', - 'resource_id': 'resource_id', - 'port': { - 'allowed_address_pairs': [{ - 'ip_address': '1.2.3.4' - }]}} - mock_neutron = MockNeutronClient(update=True) - _ctx = self._get_mock_ctx_with_node_properties(node_props) - _ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = 'test-sg-id' - _ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] = 'port' - current_ctx.set(_ctx) - with mock.patch('neutron_plugin.port.ctx', _ctx): - port = {'fixed_ips': [], - 'allowed_address_pairs': [{'ip_address': '5.6.7.8'}], - 'mac_address': 'abc-edf'} - with mock.patch( - 'neutron_plugin.port.use_external_resource', - mock.Mock(return_value=port) - ): - neutron_plugin.port.create(mock_neutron, {}) - self.assertEqual( - {'port': {'allowed_address_pairs': [{ - 'ip_address': '5.6.7.8' - }, { - 'ip_address': '1.2.3.4' - }]}}, - mock_neutron.body) - - def test_fixed_ips_no_fixed_ips(self): - node_props = {'fixed_ip': ''} - mock_neutron = MockNeutronClient(update=True) - - with mock.patch( - 'neutron_plugin.port.' - 'get_openstack_id_of_single_connected_node_by_openstack_type', - self._get_connected_subnets_mock(return_empty=True)): - with mock.patch( - 'neutron_plugin.port.ctx', - self._get_mock_ctx_with_node_properties(node_props)): - - port = {} - neutron_plugin.port._handle_fixed_ips(port, mock_neutron) - - self.assertNotIn('fixed_ips', port) - - def test_fixed_ips_subnet_only(self): - node_props = {'fixed_ip': ''} - mock_neutron = MockNeutronClient(update=True) - - with mock.patch( - 'neutron_plugin.port.' - 'get_openstack_ids_of_connected_nodes_by_openstack_type', - self._get_connected_subnets_mock(return_empty=False)): - with mock.patch( - 'neutron_plugin.port.ctx', - self._get_mock_ctx_with_node_properties(node_props)): - - port = {} - neutron_plugin.port._handle_fixed_ips(port, mock_neutron) - - self.assertEquals([{'subnet_id': 'some-subnet-id'}], - port.get('fixed_ips')) - - def test_fixed_ips_ip_address_only(self): - node_props = {'fixed_ip': '1.2.3.4'} - mock_neutron = MockNeutronClient(update=True) - - with mock.patch( - 'neutron_plugin.port.' - 'get_openstack_id_of_single_connected_node_by_openstack_type', - self._get_connected_subnets_mock(return_empty=True)): - with mock.patch( - 'neutron_plugin.port.ctx', - self._get_mock_ctx_with_node_properties(node_props)): - - port = {} - neutron_plugin.port._handle_fixed_ips(port, mock_neutron) - - self.assertEquals([{'ip_address': '1.2.3.4'}], - port.get('fixed_ips')) - - def test_fixed_ips_subnet_and_ip_address(self): - node_props = {'fixed_ip': '1.2.3.4'} - mock_neutron = MockNeutronClient(update=True) - - with mock.patch( - 'neutron_plugin.port.' - 'get_openstack_ids_of_connected_nodes_by_openstack_type', - self._get_connected_subnets_mock(return_empty=False)): - with mock.patch( - 'neutron_plugin.port.ctx', - self._get_mock_ctx_with_node_properties(node_props)): - - port = {} - neutron_plugin.port._handle_fixed_ips(port, mock_neutron) - - self.assertEquals([{'ip_address': '1.2.3.4', - 'subnet_id': 'some-subnet-id'}], - port.get('fixed_ips')) - - @staticmethod - def _get_connected_subnets_mock(return_empty=True): - return lambda *args, **kw: None if return_empty else ['some-subnet-id'] - - @staticmethod - def _get_mock_ctx_with_node_properties(properties): - return MockCloudifyContext(node_id='test_node_id', - properties=properties) - - -class MockNeutronClient(NeutronClientWithSugar): - """A fake neutron client with hard-coded test data.""" - def __init__(self, update): - self.update = update - self.body = {'port': {'id': 'test-id', 'security_groups': []}} - - def show_port(self, *_): - return self.body - - def delete_port(self, *_): - pass - - def update_port(self, _, b, **__): - if self.update: - self.body.update(b) - return - - def cosmo_get(self, *_, **__): - return self.body['port'] - - def show_subnet(self, subnet_id=None): - subnet = { - 'subnet': { - 'id': subnet_id, - } - } - if subnet_id == 'some-subnet-id': - subnet['subnet']['cidr'] = '1.2.3.0/24' - else: - subnet['subnet']['cidr'] = '2.3.4.0/24' - return subnet - - -class TestPortSG(unittest.TestCase): - @mock.patch('openstack_plugin_common._handle_kw') - def test_connect_sg_to_port(self, *_): - mock_neutron = MockNeutronClient(update=True) - ctx = MockCloudifyContext( - source=MockRelationshipSubjectContext(node=mock.MagicMock(), - instance=mock.MagicMock()), - target=MockRelationshipSubjectContext( - node=mock.MagicMock(), - instance=MockNodeInstanceContext( - runtime_properties={ - OPENSTACK_ID_PROPERTY: 'test-sg-id', - OPENSTACK_TYPE_PROPERTY: SG_OPENSTACK_TYPE}))) - - with mock.patch('neutron_plugin.port.ctx', ctx): - neutron_plugin.port.connect_security_group(mock_neutron) - self.assertIsNone(ctx.operation._operation_retry) - - @mock.patch('openstack_plugin_common._handle_kw') - def test_connect_sg_to_port_race_condition(self, *_): - mock_neutron = MockNeutronClient(update=False) - - ctx = MockCloudifyContext( - source=MockRelationshipSubjectContext(node=mock.MagicMock(), - instance=mock.MagicMock()), - target=MockRelationshipSubjectContext( - node=mock.MagicMock(), - instance=MockNodeInstanceContext( - runtime_properties={ - OPENSTACK_ID_PROPERTY: 'test-sg-id', - OPENSTACK_TYPE_PROPERTY: SG_OPENSTACK_TYPE}))) - with mock.patch('neutron_plugin.port.ctx', ctx): - neutron_plugin.port.connect_security_group(mock_neutron, ctx=ctx) - self.assertIsInstance(ctx.operation._operation_retry, - OperationRetry) - - @mock.patch('openstack_plugin_common._handle_kw') - def test_disconnect_sg_to_port(self, *_): - mock_neutron = MockNeutronClient(update=True) - ctx = MockCloudifyContext( - source=MockRelationshipSubjectContext(node=mock.MagicMock(), - instance=mock.MagicMock()), - target=MockRelationshipSubjectContext( - node=mock.MagicMock(), - instance=MockNodeInstanceContext( - runtime_properties={ - OPENSTACK_ID_PROPERTY: 'test-sg-id', - OPENSTACK_TYPE_PROPERTY: SG_OPENSTACK_TYPE}))) - - with mock.patch('neutron_plugin.port.ctx', ctx): - neutron_plugin.port.disconnect_security_group(mock_neutron) - self.assertIsNone(ctx.operation._operation_retry) diff --git a/neutron_plugin/tests/test_rbac_policy.py b/neutron_plugin/tests/test_rbac_policy.py deleted file mode 100644 index 3a9687bc..00000000 --- a/neutron_plugin/tests/test_rbac_policy.py +++ /dev/null @@ -1,530 +0,0 @@ -import mock -import unittest - -from cloudify.context import NODE_INSTANCE -from cloudify.exceptions import NonRecoverableError -from cloudify.mocks import ( - MockContext, - MockNodeInstanceContext, - MockNodeContext, - MockRelationshipContext, - MockRelationshipSubjectContext -) -from openstack_plugin_common import ( - OPENSTACK_ID_PROPERTY, - OPENSTACK_NAME_PROPERTY, - OPENSTACK_TYPE_PROPERTY -) -from neutron_plugin.rbac_policy import ( - RBAC_POLICY_OPENSTACK_TYPE, - RBAC_POLICY_APPLIED_FOR_RELATIONSHIP_TYPE -) - -import neutron_plugin - - -class TestRBACPolicy(unittest.TestCase): - test_node_instance_id = 'test-rbac-policy-instance-id' - test_tenant_id = '11111111111111111111' - test_os_rbac_policy_id = '222222222222222' - test_os_network_id = '333333333333333' - test_deployment_id = 'test-deployment-id' - - class MockRBACPolicyOS: - def __init__(self, - id, - action, - object_id, - object_type='network', - target_tenant='*'): - self._id = id - self._action = action - self._object_id = object_id - self._object_type = object_type - self._target_tenant = target_tenant - - @property - def id(self): - return self._id - - @property - def action(self): - return self._action - - @property - def object_id(self): - return self._object_id - - @property - def object_type(self): - return self._object_type - - @property - def target_tenant(self): - return self._target_tenant - - def to_dict(self): - return dict( - [(k.strip('_'), v) for k, v in vars(self).iteritems()] - ) - - def mock_neutron_client(self, mock_rbac_policy): - neutron_client = mock.MagicMock() - - neutron_client.cosmo_get_if_exists.return_value = mock_rbac_policy - neutron_client.get_name_from_resource.return_value = None - neutron_client.get_id_from_resource.return_value = \ - self.test_os_rbac_policy_id - neutron_client.cosmo_delete_resource = mock.MagicMock() - neutron_client.create_rbac_policy.return_value = { - 'rbac_policy': mock_rbac_policy.to_dict() - } - neutron_client.show_rbac_policy.return_value = mock_rbac_policy - neutron_client.list_rbac_policies.return_value = { - 'rbac_policies': [mock_rbac_policy.to_dict()] - } - - return neutron_client - - def mock_ctx(self, - test_properties, - test_node_instance_id, - test_deployment_id, - runtime_properties=None, - test_relationships=None): - - ctx = MockContext() - ctx.node = MockNodeContext(properties=test_properties) - ctx.instance = MockNodeInstanceContext( - id=test_node_instance_id, - runtime_properties=runtime_properties or {}, - relationships=test_relationships or [] - ) - ctx.deployment = mock.Mock() - ctx.deployment.id = test_deployment_id - ctx.bootstrap_context = mock.Mock() - setattr(ctx.bootstrap_context, 'resources_prefix', '') - ctx.type = NODE_INSTANCE - ctx.logger = mock.Mock() - - return ctx - - def mock_properties(self, - use_external_resource=False, - create_if_missing=False, - resource_id='', - include_reference=True): - rbac_properties = { - 'target_tenant': self.test_tenant_id, - 'action': 'access_as_shared' - } - - if include_reference: - rbac_properties['object_type'] = 'network' - rbac_properties['object_id'] = self.test_os_network_id - - properties = { - 'resource_id': resource_id, - 'use_external_resource': use_external_resource, - 'create_if_missing': create_if_missing, - RBAC_POLICY_OPENSTACK_TYPE: rbac_properties - } - - return properties - - def mock_relationship(self, - type=RBAC_POLICY_APPLIED_FOR_RELATIONSHIP_TYPE, - runtime_properties=None): - - class _MockRelationshipContext(MockRelationshipContext): - - @property - def type_hierarchy(self): - return [self.type] - - return _MockRelationshipContext( - MockRelationshipSubjectContext( - node=None, - instance=MockNodeInstanceContext( - runtime_properties=runtime_properties or {}, - ) - ), - type=type - ) - - def mock_all(self, relationships=None, **kwargs): - ctx = self.mock_ctx( - self.mock_properties(**kwargs), - self.test_node_instance_id, - self.test_deployment_id, - { - OPENSTACK_ID_PROPERTY: self.test_node_instance_id, - OPENSTACK_NAME_PROPERTY: None, - OPENSTACK_TYPE_PROPERTY: RBAC_POLICY_OPENSTACK_TYPE, - }, - relationships or [] - ) - neutron_plugin.rbac_policy.ctx = ctx - mocked_rbac_policy = self.MockRBACPolicyOS( - id=self.test_os_rbac_policy_id, - object_id=self.test_os_network_id, - action='access_as_shared', - target_tenant=self.test_tenant_id - ) - neutron_client = self.mock_neutron_client(mocked_rbac_policy) - - return ctx, neutron_client, mocked_rbac_policy - - @mock.patch( - 'openstack_plugin_common._handle_kw', - autospec=True, - return_value=None - ) - def test_create_and_delete(self, *_): - # given - ctx, neutron_client, _ = self.mock_all() - - # when (create) - neutron_plugin.rbac_policy.create(neutron_client, {}) - - # then (create) - neutron_client.create_rbac_policy.assert_called_once() - - self.assertEqual( - None, - ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] - ) - self.assertEqual( - self.test_os_rbac_policy_id, - ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] - ) - self.assertEqual( - RBAC_POLICY_OPENSTACK_TYPE, - ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] - ) - - # when (delete) - neutron_plugin.rbac_policy.delete(neutron_client) - - # then (delete) - neutron_client.cosmo_delete_resource.assert_called_once_with( - RBAC_POLICY_OPENSTACK_TYPE, - self.test_os_rbac_policy_id - ) - self.assertNotIn( - OPENSTACK_ID_PROPERTY, - ctx.instance.runtime_properties - ) - self.assertNotIn( - OPENSTACK_NAME_PROPERTY, - ctx.instance.runtime_properties - ) - self.assertNotIn( - OPENSTACK_TYPE_PROPERTY, - ctx.instance.runtime_properties - ) - - @mock.patch( - 'openstack_plugin_common._handle_kw', - autospec=True, - return_value=None - ) - def test_create_and_delete_external_resource(self, *_): - # given - ctx, neutron_client, _ = self.mock_all(use_external_resource=True) - - # when (create) - neutron_plugin.rbac_policy.create(neutron_client, {}) - - # then (create) - neutron_client.create_rbac_policy.assert_not_called() - - self.assertEqual( - None, - ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] - ) - self.assertEqual( - self.test_os_rbac_policy_id, - ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] - ) - self.assertEqual( - RBAC_POLICY_OPENSTACK_TYPE, - ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] - ) - - # when (delete) - neutron_plugin.rbac_policy.delete(neutron_client) - - # then (delete) - neutron_client.cosmo_delete_resource.assert_not_called() - - self.assertNotIn( - OPENSTACK_ID_PROPERTY, - ctx.instance.runtime_properties - ) - self.assertNotIn( - OPENSTACK_NAME_PROPERTY, - ctx.instance.runtime_properties - ) - self.assertNotIn( - OPENSTACK_TYPE_PROPERTY, - ctx.instance.runtime_properties - ) - - @mock.patch( - 'openstack_plugin_common._handle_kw', - autospec=True, - return_value=None - ) - def test_create_and_delete_using_relationship(self, *_): - # given - ctx, neutron_client, _ = self.mock_all( - include_reference=False, - relationships=[ - self.mock_relationship( - runtime_properties={ - OPENSTACK_TYPE_PROPERTY: 'network', - OPENSTACK_ID_PROPERTY: self.test_os_network_id - } - ), - self.mock_relationship( - type='cloudify.relationships.depends_on' - ) - ] - ) - - # when (create) - neutron_plugin.rbac_policy.create(neutron_client, {}) - - # then (create) - neutron_client.create_rbac_policy.assert_called_once() - - self.assertEqual( - None, - ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] - ) - self.assertEqual( - self.test_os_rbac_policy_id, - ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] - ) - self.assertEqual( - RBAC_POLICY_OPENSTACK_TYPE, - ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] - ) - - # when (delete) - neutron_plugin.rbac_policy.delete(neutron_client) - - # then (delete) - neutron_client.cosmo_delete_resource.assert_called_once_with( - RBAC_POLICY_OPENSTACK_TYPE, - self.test_os_rbac_policy_id - ) - self.assertNotIn( - OPENSTACK_ID_PROPERTY, - ctx.instance.runtime_properties - ) - self.assertNotIn( - OPENSTACK_NAME_PROPERTY, - ctx.instance.runtime_properties - ) - self.assertNotIn( - OPENSTACK_TYPE_PROPERTY, - ctx.instance.runtime_properties - ) - - @mock.patch( - 'openstack_plugin_common._handle_kw', - autospec=True, - return_value=None - ) - def test_fail_create_using_multiple_relationships(self, *_): - # given - ctx, neutron_client, _ = self.mock_all( - include_reference=False, - relationships=[ - self.mock_relationship( - runtime_properties={ - OPENSTACK_TYPE_PROPERTY: 'network', - OPENSTACK_ID_PROPERTY: self.test_os_network_id - } - ), - self.mock_relationship() - ] - ) - - # when + then - with self.assertRaises(NonRecoverableError): - neutron_plugin.rbac_policy.create(neutron_client, {}) - - @mock.patch( - 'openstack_plugin_common._handle_kw', - autospec=True, - return_value=None - ) - def test_fail_create_using_relationship_with_missing_data(self, *_): - # given - ctx, neutron_client, _ = self.mock_all( - include_reference=False, - relationships=[self.mock_relationship()] - ) - - # when - neutron_plugin.rbac_policy.create(neutron_client, {}) - - # then - neutron_client.create_rbac_policy.assert_called_once_with({ - 'rbac_policy': { - 'target_tenant': self.test_tenant_id, - 'action': 'access_as_shared' - } - }) - # should cause "Bad Request" openstack API error bacause of lack - # "object_id" and "object_type" fields - - @mock.patch( - 'openstack_plugin_common._handle_kw', - autospec=True, - return_value=None - ) - def test_fail_create_using_relationship_and_properties(self, *_): - # given - ctx, neutron_client, _ = self.mock_all( - relationships=[ - self.mock_relationship( - runtime_properties={ - OPENSTACK_TYPE_PROPERTY: 'network', - OPENSTACK_ID_PROPERTY: self.test_os_network_id - } - ) - ] - ) - - # when + then - with self.assertRaises(NonRecoverableError): - neutron_plugin.rbac_policy.create(neutron_client, {}) - - @mock.patch( - 'openstack_plugin_common._handle_kw', - autospec=True, - return_value=None - ) - def test_create_and_delete_using_args(self, *_): - # given - ctx, neutron_client, _ = self.mock_all( - include_reference=False, - ) - - # when (create) - neutron_plugin.rbac_policy.create( - neutron_client, - self.mock_properties() - ) - - # then (create) - neutron_client.create_rbac_policy.assert_called_once() - - self.assertEqual( - None, - ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] - ) - self.assertEqual( - self.test_os_rbac_policy_id, - ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] - ) - self.assertEqual( - RBAC_POLICY_OPENSTACK_TYPE, - ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] - ) - - # when (delete) - neutron_plugin.rbac_policy.delete(neutron_client) - - # then (delete) - neutron_client.cosmo_delete_resource.assert_called_once_with( - RBAC_POLICY_OPENSTACK_TYPE, - self.test_os_rbac_policy_id - ) - self.assertNotIn( - OPENSTACK_ID_PROPERTY, - ctx.instance.runtime_properties - ) - self.assertNotIn( - OPENSTACK_NAME_PROPERTY, - ctx.instance.runtime_properties - ) - self.assertNotIn( - OPENSTACK_TYPE_PROPERTY, - ctx.instance.runtime_properties - ) - - @mock.patch( - 'openstack_plugin_common._handle_kw', - autospec=True, - return_value=None - ) - def test_fail_create_using_relationship_and_args(self, *_): - # given - ctx, neutron_client, _ = self.mock_all( - include_reference=False, - relationships=[ - self.mock_relationship( - runtime_properties={ - OPENSTACK_TYPE_PROPERTY: 'network', - OPENSTACK_ID_PROPERTY: self.test_os_network_id - } - ) - ] - ) - - # when + then - with self.assertRaises(NonRecoverableError): - neutron_plugin.rbac_policy.create( - neutron_client, - self.mock_properties() - ) - - @mock.patch( - 'openstack_plugin_common._handle_kw', - autospec=True, - return_value=None - ) - def test_list(self, *_): - # given - ctx, neutron_client, _ = self.mock_all() - - # when - neutron_plugin.rbac_policy.list_rbac_policies(neutron_client, {}) - - # then - rbac_policies_list_key = '{}_list'.format(RBAC_POLICY_OPENSTACK_TYPE) - self.assertIn(rbac_policies_list_key, ctx.instance.runtime_properties) - self.assertEqual( - [{ - 'target_tenant': self.test_tenant_id, - 'action': 'access_as_shared', - 'object_type': 'network', - 'object_id': self.test_os_network_id, - 'id': self.test_os_rbac_policy_id - }], - ctx.instance.runtime_properties[rbac_policies_list_key] - ) - - @mock.patch( - 'openstack_plugin_common._handle_kw', - autospec=True, - return_value=None - ) - def test_find_and_delete(self, *_): - # given - ctx, neutron_client, _ = self.mock_all() - - # when - neutron_plugin.rbac_policy.find_and_delete(neutron_client, {}) - - # then - neutron_client.list_rbac_policies.assert_called_once() - neutron_client.cosmo_delete_resource.assert_called_once_with( - RBAC_POLICY_OPENSTACK_TYPE, - self.test_os_rbac_policy_id - ) diff --git a/neutron_plugin/tests/test_router.py b/neutron_plugin/tests/test_router.py deleted file mode 100644 index 5c24c980..00000000 --- a/neutron_plugin/tests/test_router.py +++ /dev/null @@ -1,84 +0,0 @@ -######## -# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - -import unittest - -from mock import patch, Mock - -import neutron_plugin.router -from cloudify.mocks import MockCloudifyContext -from cloudify.state import current_ctx - -from openstack_plugin_common import (NeutronClientWithSugar, - OPENSTACK_ID_PROPERTY) -from cloudify.exceptions import NonRecoverableError - - -@patch('openstack_plugin_common.NeutronClientWithSugar') -class TestRouter(unittest.TestCase): - - def update_router(self, mock_nc): - node_routes_props = { - 'routes': [ - { - 'nexthop': '192.168.120.123', - 'destination': '192.168.121.0/24', - } - ] - } - ctx = self._get_mock_ctx_with_node_properties(node_routes_props) - current_ctx.set(ctx=ctx) - mock_nc.show_router = Mock(return_value={ - 'router': { - 'id': 'id', - 'routes': [ - { - 'nexthop': '192.168.120.123', - 'destination': '192.168.121.0/24', - } - ] - } - }) - - neutron_plugin.router.add_routes(neutron_client=mock_nc) - - def test_update_router_wrong_type(self, mock_nc): - node_props = {} - ctx = self._get_mock_ctx_with_node_properties(node_props) - current_ctx.set(ctx=ctx) - with self.assertRaises(NonRecoverableError): - neutron_plugin.router.add_routes(neutron_client=mock_nc) - - @staticmethod - def _get_mock_ctx_with_node_properties(properties): - return MockCloudifyContext( - node_id='test_node_id', - properties=properties, - runtime_properties={OPENSTACK_ID_PROPERTY: 'id'}) - - -class MockNeutronClient(NeutronClientWithSugar): - """A fake neutron client with hard-coded test data.""" - def __init__(self, update): - self.update = update - self.body = {'router_id': '', 'router': {'routes': []}} - - def show_router(self, *_): - return self.body - - def update_router(self, _, b, **__): - if self.update: - self.body.update(b) - return diff --git a/neutron_plugin/tests/test_security_group.py b/neutron_plugin/tests/test_security_group.py deleted file mode 100644 index 25a0b573..00000000 --- a/neutron_plugin/tests/test_security_group.py +++ /dev/null @@ -1,114 +0,0 @@ -# -*- coding: utf-8 -*- -######### -# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - -import unittest - -from mock import Mock, patch -from requests.exceptions import RequestException - -from neutron_plugin import security_group - -from cloudify.exceptions import NonRecoverableError -from cloudify.state import current_ctx - -from cloudify.mocks import MockCloudifyContext - - -class FakeException(Exception): - pass - - -@patch('openstack_plugin_common.OpenStackClient._validate_auth_params') -@patch('openstack_plugin_common.NeutronClientWithSugar') -class TestSecurityGroup(unittest.TestCase): - - def setUp(self): - super(TestSecurityGroup, self).setUp() - self.nova_client = Mock() - - self.ctx = MockCloudifyContext( - node_id='test', - deployment_id='test', - properties={ - 'description': 'The best Security Group. Great', - 'rules': [], - 'resource_id': 'mock_sg', - 'security_group': {}, - 'server': {}, - 'openstack_config': { - 'auth_url': 'things/v3', - }, - }, - operation={'retry_number': 0}, - provider_context={'resources': {}} - ) - current_ctx.set(self.ctx) - self.addCleanup(current_ctx.clear) - - findctx = patch( - 'openstack_plugin_common._find_context_in_kw', - return_value=self.ctx, - ) - findctx.start() - self.addCleanup(findctx.stop) - - def test_set_sg_runtime_properties(self, mock_nc, *_): - security_group.create( - nova_client=self.nova_client, - ctx=self.ctx, - args={}, - ) - - self.assertEqual( - { - 'external_type': 'security_group', - 'external_id': mock_nc().get_id_from_resource(), - 'external_name': mock_nc().get_name_from_resource(), - }, - self.ctx.instance.runtime_properties - ) - - def test_create_sg_wait_timeout(self, mock_nc, *_): - mock_nc().show_security_group.side_effect = RequestException - - with self.assertRaises(NonRecoverableError): - security_group.create( - nova_client=self.nova_client, - ctx=self.ctx, - args={}, - status_attempts=3, - status_timeout=0.001, - ) - - @patch( - 'neutron_plugin.security_group.delete_resource_and_runtime_properties') - def test_dont_duplicate_if_failed_rule(self, mock_del_res, mock_nc, *_): - self.ctx.node.properties['rules'] = [ - { - 'port': '🍷', - }, - ] - mock_nc().create_security_group_rule.side_effect = FakeException - mock_del_res.side_effect = FakeException('the 2nd') - - with self.assertRaises(NonRecoverableError) as e: - security_group.create( - nova_client=self.nova_client, - ctx=self.ctx, - args={}, - ) - - self.assertIn('the 2nd', str(e.exception)) diff --git a/nova_plugin/__init__.py b/nova_plugin/__init__.py deleted file mode 100644 index bb533273..00000000 --- a/nova_plugin/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -######### -# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - -__author__ = 'idanmo' diff --git a/nova_plugin/flavor.py b/nova_plugin/flavor.py deleted file mode 100644 index 1c08f803..00000000 --- a/nova_plugin/flavor.py +++ /dev/null @@ -1,97 +0,0 @@ -######### -# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - -from cloudify import ctx -from cloudify.decorators import operation - -from openstack_plugin_common import ( - with_nova_client, - use_external_resource, - delete_runtime_properties, - delete_resource_and_runtime_properties, - create_object_dict, - add_list_to_runtime_properties, - set_openstack_runtime_properties, - COMMON_RUNTIME_PROPERTIES_KEYS -) - -FLAVOR_OPENSTACK_TYPE = 'flavor' - -EXTRA_SPECS_PROPERTY = 'extra_specs' - -TENANTS_PROPERTY = 'tenants' - -RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS - - -def _set_extra_specs(ctx, flavor): - extra_specs = ctx.node.properties.get(EXTRA_SPECS_PROPERTY, {}) - - if extra_specs: - ctx.logger.info( - 'Setting extra specs: {0} for flavor: {1}' - .format(extra_specs, flavor.to_dict()) - ) - - flavor.set_keys(extra_specs) - - ctx.instance.runtime_properties[EXTRA_SPECS_PROPERTY] = extra_specs - - -def _set_tenants_access(ctx, nova_client, flavor): - tenants = ctx.node.properties.get(TENANTS_PROPERTY, []) - - for tenant in tenants: - ctx.logger.info( - 'Adding tenant access: {0} for flavor: {1}' - .format(tenant, flavor.to_dict()) - ) - nova_client.flavor_access.add_tenant_access(flavor, tenant) - - ctx.instance.runtime_properties[TENANTS_PROPERTY] = tenants - - -@operation -@with_nova_client -def create(nova_client, args, **kwargs): - if use_external_resource(ctx, nova_client, FLAVOR_OPENSTACK_TYPE): - return - - flavor_dict = create_object_dict(ctx, FLAVOR_OPENSTACK_TYPE, args, {}) - ctx.logger.info('Creating flavor: {0}'.format(flavor_dict)) - - flavor = nova_client.flavors.create(**flavor_dict) - set_openstack_runtime_properties(ctx, flavor, FLAVOR_OPENSTACK_TYPE) - - _set_extra_specs(ctx, flavor) - _set_tenants_access(ctx, nova_client, flavor) - - -@operation -@with_nova_client -def delete(nova_client, **kwargs): - delete_resource_and_runtime_properties( - ctx, - nova_client, - RUNTIME_PROPERTIES_KEYS - ) - - delete_runtime_properties(ctx, [EXTRA_SPECS_PROPERTY, TENANTS_PROPERTY]) - - -@with_nova_client -def list_flavors(nova_client, args, **kwargs): - flavor_list = nova_client.flavors.list(**args) - add_list_to_runtime_properties(ctx, FLAVOR_OPENSTACK_TYPE, flavor_list) diff --git a/nova_plugin/floatingip.py b/nova_plugin/floatingip.py deleted file mode 100644 index e770c540..00000000 --- a/nova_plugin/floatingip.py +++ /dev/null @@ -1,60 +0,0 @@ -######### -# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - -from cloudify import ctx -from cloudify.decorators import operation -from openstack_plugin_common import with_nova_client -from openstack_plugin_common.floatingip import ( - use_external_floatingip, - set_floatingip_runtime_properties, - delete_floatingip, - floatingip_creation_validation -) - - -# random note regarding nova floating-ips: floating ips on nova-net have -# pre-assigned ids, and thus a call "nova.floating_ips.get()" will -# return a value even if the floating-ip isn't even allocated. -# currently all lookups in the code, including by id, use search (i.e. -# nova..findall) and lists, which won't return such unallocated -# resources. - -@operation -@with_nova_client -def create(nova_client, args, **kwargs): - - if use_external_floatingip(nova_client, 'ip', - lambda ext_fip: ext_fip.ip): - return - - floatingip = { - 'pool': None - } - floatingip.update(ctx.node.properties['floatingip'], **args) - - fip = nova_client.floating_ips.create(floatingip['pool']) - set_floatingip_runtime_properties(fip.id, fip.ip) - - -@operation -@with_nova_client -def delete(nova_client, **kwargs): - delete_floatingip(nova_client) - - -@operation -@with_nova_client -def creation_validation(nova_client, **kwargs): - floatingip_creation_validation(nova_client, 'ip') diff --git a/nova_plugin/host_aggregate.py b/nova_plugin/host_aggregate.py deleted file mode 100644 index 5222603e..00000000 --- a/nova_plugin/host_aggregate.py +++ /dev/null @@ -1,171 +0,0 @@ -######### -# Copyright (c) 2018 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - -from cloudify import ctx -from cloudify.decorators import operation - -from openstack_plugin_common import (with_nova_client, - get_openstack_id, - get_property, - is_external_resource, - use_external_resource, - delete_resource_and_runtime_properties, - delete_runtime_properties, - create_object_dict, - add_list_to_runtime_properties, - set_openstack_runtime_properties, - COMMON_RUNTIME_PROPERTIES_KEYS) - -HOST_AGGREGATE_OPENSTACK_TYPE = 'aggregate' -HOSTS_PROPERTY = 'hosts' -METADATA_PROPERTY = 'metadata' -RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS - - -def _add_hosts(ctx, nova_client, host_aggregate, hosts): - for host in hosts: - ctx.logger.debug( - 'Adding host {0} to aggregate {1}' - .format(host, host_aggregate) - ) - - nova_client.aggregates.add_host(host_aggregate, host) - - if HOSTS_PROPERTY in ctx.instance.runtime_properties: - hosts = list( - set(hosts + ctx.instance.runtime_properties[HOSTS_PROPERTY]) - ) - - ctx.instance.runtime_properties[HOSTS_PROPERTY] = hosts - - -def _set_metadata(ctx, nova_client, host_aggregate, kwargs): - metadata = get_property(ctx, METADATA_PROPERTY, kwargs, {}) - - if metadata: - ctx.logger.debug( - 'Setting metadata {0} for aggregate {1}' - .format(metadata, host_aggregate) - ) - - nova_client.aggregates.set_metadata(host_aggregate, metadata) - - -def _remove_hosts(ctx, nova_client, host_aggregate_id, hosts): - for host in hosts: - ctx.logger.debug( - 'Removing host {0} from aggregate {1}' - .format(host, host_aggregate_id) - ) - nova_client.aggregates.remove_host(host_aggregate_id, host) - - current_hosts = [ - host - for host in ctx.instance.runtime_properties.get(HOSTS_PROPERTY, []) - if host not in hosts - ] - - if current_hosts: - ctx.instance.runtime_properties[HOSTS_PROPERTY] = current_hosts - else: - delete_runtime_properties(ctx, HOSTS_PROPERTY) - - -@operation -@with_nova_client -def create(nova_client, args, **kwargs): - if use_external_resource(ctx, nova_client, HOST_AGGREGATE_OPENSTACK_TYPE): - return - - host_aggregate_dict = create_object_dict( - ctx, - HOST_AGGREGATE_OPENSTACK_TYPE, - args - ) - - host_aggregate = nova_client.aggregates.create(**host_aggregate_dict) - hosts = get_property(ctx, HOSTS_PROPERTY, args, []) - _add_hosts(ctx, nova_client, host_aggregate, hosts) - _set_metadata(ctx, nova_client, host_aggregate, args) - - set_openstack_runtime_properties( - ctx, - host_aggregate, - HOST_AGGREGATE_OPENSTACK_TYPE - ) - - -@operation -@with_nova_client -def delete(nova_client, **kwargs): - if not is_external_resource(ctx): - host_aggregate = nova_client.aggregates.get(get_openstack_id(ctx)) - _remove_hosts( - ctx, - nova_client, - get_openstack_id(ctx), - host_aggregate.hosts - ) - - if HOSTS_PROPERTY in ctx.instance.runtime_properties: - ctx.instance.runtime_properties.pop(HOSTS_PROPERTY, None) - - delete_resource_and_runtime_properties( - ctx, - nova_client, - RUNTIME_PROPERTIES_KEYS - ) - - -@operation -@with_nova_client -def update(nova_client, args, **kwargs): - if HOST_AGGREGATE_OPENSTACK_TYPE in args: - host_aggregate = nova_client.aggregates.update( - get_openstack_id(ctx), - args.get(HOST_AGGREGATE_OPENSTACK_TYPE) - ) - - set_openstack_runtime_properties( - ctx, - host_aggregate, - HOST_AGGREGATE_OPENSTACK_TYPE - ) - - _set_metadata(ctx, nova_client, get_openstack_id(ctx), args) - - -@operation -@with_nova_client -def list_host_aggregates(nova_client, **kwargs): - host_aggregates_list = nova_client.aggregates.list() - - add_list_to_runtime_properties( - ctx, - HOST_AGGREGATE_OPENSTACK_TYPE, - host_aggregates_list - ) - - -@operation -@with_nova_client -def add_hosts(nova_client, hosts, **kwargs): - _add_hosts(ctx, nova_client, get_openstack_id(ctx), hosts) - - -@operation -@with_nova_client -def remove_hosts(nova_client, hosts, **kwargs): - _remove_hosts(ctx, nova_client, get_openstack_id(ctx), hosts) diff --git a/nova_plugin/keypair.py b/nova_plugin/keypair.py deleted file mode 100644 index 1407a9be..00000000 --- a/nova_plugin/keypair.py +++ /dev/null @@ -1,200 +0,0 @@ -######### -# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - -import os -import errno -from getpass import getuser - -from cloudify import ctx -from cloudify.decorators import operation -from cloudify.exceptions import NonRecoverableError -from openstack_plugin_common import ( - with_nova_client, - validate_resource, - use_external_resource, - create_object_dict, - is_external_resource, - is_external_resource_not_conditionally_created, - delete_runtime_properties, - get_openstack_id, - add_list_to_runtime_properties, - delete_resource_and_runtime_properties, - set_openstack_runtime_properties, - COMMON_RUNTIME_PROPERTIES_KEYS -) - -RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS -KEYPAIR_OPENSTACK_TYPE = 'keypair' - -PRIVATE_KEY_PATH_PROP = 'private_key_path' - - -@operation -@with_nova_client -def create(nova_client, args, **kwargs): - - private_key_path = _get_private_key_path() - pk_exists = _check_private_key_exists(private_key_path) - - if use_external_resource(ctx, nova_client, KEYPAIR_OPENSTACK_TYPE): - if not pk_exists: - delete_runtime_properties(ctx, RUNTIME_PROPERTIES_KEYS) - raise NonRecoverableError( - 'Failed to use external keypair (node {0}): the public key {1}' - ' is available on Openstack, but the private key could not be ' - 'found at {2}'.format(ctx.node.id, - ctx.node.properties['resource_id'], - private_key_path)) - return - - if pk_exists: - raise NonRecoverableError( - "Can't create keypair - private key path already exists: {0}" - .format(private_key_path)) - - keypair = create_object_dict(ctx, KEYPAIR_OPENSTACK_TYPE, args, {}) - - keypair = nova_client.keypairs.create(keypair['name'], - keypair.get('public_key')) - - set_openstack_runtime_properties(ctx, keypair, KEYPAIR_OPENSTACK_TYPE) - - try: - # write private key file - _mkdir_p(os.path.dirname(private_key_path)) - with open(private_key_path, 'w') as f: - f.write(keypair.private_key) - os.chmod(private_key_path, 0600) - except Exception: - _delete_private_key_file() - delete_resource_and_runtime_properties(ctx, nova_client, - RUNTIME_PROPERTIES_KEYS) - raise - - -@operation -@with_nova_client -def delete(nova_client, **kwargs): - if not is_external_resource(ctx): - ctx.logger.info('deleting keypair') - - _delete_private_key_file() - - nova_client.keypairs.delete(get_openstack_id(ctx)) - else: - ctx.logger.info('not deleting keypair since an external keypair is ' - 'being used') - - delete_runtime_properties(ctx, RUNTIME_PROPERTIES_KEYS) - - -@with_nova_client -def list_keypairs(nova_client, args, **kwargs): - keypair_list = nova_client.keypairs.list(**args) - add_list_to_runtime_properties(ctx, KEYPAIR_OPENSTACK_TYPE, keypair_list) - - -@operation -@with_nova_client -def creation_validation(nova_client, **kwargs): - - def validate_private_key_permissions(private_key_path): - ctx.logger.debug('checking whether private key file {0} has the ' - 'correct permissions'.format(private_key_path)) - if not os.access(private_key_path, os.R_OK): - err = 'private key file {0} is not readable'\ - .format(private_key_path) - ctx.logger.error('VALIDATION ERROR: ' + err) - raise NonRecoverableError(err) - ctx.logger.debug('OK: private key file {0} has the correct ' - 'permissions'.format(private_key_path)) - - def validate_path_owner(path): - ctx.logger.debug('checking whether directory {0} is owned by the ' - 'current user'.format(path)) - from pwd import getpwnam, getpwuid - - user = getuser() - owner = getpwuid(os.stat(path).st_uid).pw_name - current_user_id = str(getpwnam(user).pw_uid) - owner_id = str(os.stat(path).st_uid) - - if not current_user_id == owner_id: - err = '{0} is not owned by the current user (it is owned by {1})'\ - .format(path, owner) - ctx.logger.warning('VALIDATION WARNING: {0}'.format(err)) - return - ctx.logger.debug('OK: {0} is owned by the current user'.format(path)) - - validate_resource(ctx, nova_client, KEYPAIR_OPENSTACK_TYPE) - - private_key_path = _get_private_key_path() - pk_exists = _check_private_key_exists(private_key_path) - - if is_external_resource_not_conditionally_created(ctx): - if pk_exists: - if os.name == 'posix': - validate_private_key_permissions(private_key_path) - validate_path_owner(private_key_path) - else: - err = "can't use external keypair: the public key {0} is " \ - "available on Openstack, but the private key could not be " \ - "found at {1}".format(ctx.node.properties['resource_id'], - private_key_path) - ctx.logger.error('VALIDATION ERROR: {0}'.format(err)) - raise NonRecoverableError(err) - else: - if pk_exists: - err = 'private key path already exists: {0}'.format( - private_key_path) - ctx.logger.error('VALIDATION ERROR: {0}'.format(err)) - raise NonRecoverableError(err) - else: - err = 'private key directory {0} is not writable' - while private_key_path: - if os.path.isdir(private_key_path): - if not os.access(private_key_path, os.W_OK | os.X_OK): - raise NonRecoverableError(err.format(private_key_path)) - else: - break - private_key_path, _ = os.path.split(private_key_path) - - ctx.logger.debug('OK: keypair configuration is valid') - - -def _get_private_key_path(): - return os.path.expanduser(ctx.node.properties[PRIVATE_KEY_PATH_PROP]) - - -def _delete_private_key_file(): - private_key_path = _get_private_key_path() - ctx.logger.debug('deleting private key file at {0}'.format( - private_key_path)) - try: - os.remove(private_key_path) - except OSError as e: - if e.errno == errno.ENOENT: - # file was already deleted somehow - pass - raise - - -def _check_private_key_exists(private_key_path): - return os.path.isfile(private_key_path) - - -def _mkdir_p(path): - if path and not os.path.isdir(path): - os.makedirs(path) diff --git a/nova_plugin/security_group.py b/nova_plugin/security_group.py deleted file mode 100644 index 283eae85..00000000 --- a/nova_plugin/security_group.py +++ /dev/null @@ -1,81 +0,0 @@ -######### -# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - -from cloudify import ctx -from cloudify.decorators import operation -from openstack_plugin_common import ( - transform_resource_name, - with_nova_client, - delete_resource_and_runtime_properties -) -from openstack_plugin_common.security_group import ( - build_sg_data, - process_rules, - use_external_sg, - set_sg_runtime_properties, - delete_sg, - sg_creation_validation, - RUNTIME_PROPERTIES_KEYS -) - - -@operation -@with_nova_client -def create(nova_client, args, **kwargs): - - security_group = build_sg_data(args) - security_group['description'] = ctx.node.properties['description'] - - sgr_default_values = { - 'ip_protocol': 'tcp', - 'from_port': 1, - 'to_port': 65535, - 'cidr': '0.0.0.0/0', - # 'group_id': None, - # 'parent_group_id': None, - } - sg_rules = process_rules(nova_client, sgr_default_values, - 'cidr', 'group_id', 'from_port', 'to_port') - - if use_external_sg(nova_client): - return - - transform_resource_name(ctx, security_group) - - sg = nova_client.security_groups.create( - security_group['name'], security_group['description']) - - set_sg_runtime_properties(sg, nova_client) - - try: - for sgr in sg_rules: - sgr['parent_group_id'] = sg.id - nova_client.security_group_rules.create(**sgr) - except Exception: - delete_resource_and_runtime_properties(ctx, nova_client, - RUNTIME_PROPERTIES_KEYS) - raise - - -@operation -@with_nova_client -def delete(nova_client, **kwargs): - delete_sg(nova_client) - - -@operation -@with_nova_client -def creation_validation(nova_client, **kwargs): - sg_creation_validation(nova_client, 'cidr') diff --git a/nova_plugin/server.py b/nova_plugin/server.py deleted file mode 100644 index 61cfd3c5..00000000 --- a/nova_plugin/server.py +++ /dev/null @@ -1,1314 +0,0 @@ -######### -# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - - -import netaddr -import os -import time -import copy -import operator - -from novaclient import exceptions as nova_exceptions - -from cloudify import ctx -from cloudify.manager import get_rest_client -from cloudify.decorators import operation -from cloudify.exceptions import NonRecoverableError, RecoverableError -from cinder_plugin import volume -from openstack_plugin_common import ( - provider, - transform_resource_name, - get_resource_id, - get_openstack_id, - add_list_to_runtime_properties, - get_openstack_ids_of_connected_nodes_by_openstack_type, - with_glance_client, - with_nova_client, - with_cinder_client, - assign_payload_as_runtime_properties, - get_openstack_id_of_single_connected_node_by_openstack_type, - get_openstack_names_of_connected_nodes_by_openstack_type, - get_single_connected_node_by_openstack_type, - is_external_resource, - is_external_resource_by_properties, - is_external_resource_not_conditionally_created, - is_external_relationship_not_conditionally_created, - use_external_resource, - delete_runtime_properties, - is_external_relationship, - validate_resource, - USE_EXTERNAL_RESOURCE_PROPERTY, - OPENSTACK_AZ_PROPERTY, - OPENSTACK_ID_PROPERTY, - OPENSTACK_TYPE_PROPERTY, - OPENSTACK_NAME_PROPERTY, - COMMON_RUNTIME_PROPERTIES_KEYS, - with_neutron_client) -from nova_plugin.keypair import KEYPAIR_OPENSTACK_TYPE -from nova_plugin.server_group import SERVER_GROUP_OPENSTACK_TYPE -from nova_plugin import userdata -from openstack_plugin_common.floatingip import (IP_ADDRESS_PROPERTY, - get_server_floating_ip) -from neutron_plugin.network import NETWORK_OPENSTACK_TYPE -from neutron_plugin.port import PORT_OPENSTACK_TYPE -from cinder_plugin.volume import VOLUME_OPENSTACK_TYPE, VOLUME_BOOTABLE -from openstack_plugin_common.security_group import \ - SECURITY_GROUP_OPENSTACK_TYPE -from glance_plugin.image import handle_image_from_relationship - -SERVER_OPENSTACK_TYPE = 'server' - -# server status constants. -# Full lists here: http://docs.openstack.org/api/openstack-compute/2/content/List_Servers-d1e2078.html # NOQA -SERVER_STATUS_ACTIVE = 'ACTIVE' -SERVER_STATUS_BUILD = 'BUILD' -SERVER_STATUS_SHUTOFF = 'SHUTOFF' -SERVER_STATUS_SUSPENDED = 'SUSPENDED' -SERVER_STATUS_ERROR = 'ERROR' -SERVER_STATUS_REBOOT = 'REBOOT' -SERVER_STATUS_HARD_REBOOT = 'HARD_REBOOT' -SERVER_STATUS_UNKNOWN = 'UNKNOWN' - -OS_EXT_STS_TASK_STATE = 'OS-EXT-STS:task_state' -SERVER_TASK_STATE_POWERING_ON = 'powering-on' -SERVER_TASK_STATE_POWERING_OFF = 'powering-off' - -MUST_SPECIFY_NETWORK_EXCEPTION_TEXT = 'More than one possible network found.' -SERVER_DELETE_CHECK_SLEEP = 2 - -# Runtime properties -NETWORKS_PROPERTY = 'networks' # all of the server's ips -IP_PROPERTY = 'ip' # the server's private ip -IPV4_PROPERTY = 'ipv4_address' -IPV6_PROPERTY = 'ipv6_address' -IPV4_LIST_PROPERTY = 'ipv4_addresses' -IPV6_LIST_PROPERTY = 'ipv6_addresses' -ADMIN_PASSWORD_PROPERTY = 'password' # the server's password -RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS + \ - [NETWORKS_PROPERTY, IP_PROPERTY, ADMIN_PASSWORD_PROPERTY] - - -def _get_management_network_id_and_name(neutron_client, ctx): - """Examine the context to find the management network id and name.""" - management_network_id = None - management_network_name = \ - ctx.node.properties.get('management_network_name') - provider_context = provider(ctx) - - if management_network_name: - management_network_name = transform_resource_name( - ctx, management_network_name) - management_network_id = neutron_client.cosmo_get_named( - 'network', management_network_name) - management_network_id = management_network_id['id'] - else: - int_network = provider_context.int_network - if int_network: - management_network_id = int_network['id'] - management_network_name = int_network['name'] # Already transform. - - return management_network_id, management_network_name - - -def _merge_nics(management_network_id, *nics_sources): - """Merge nics_sources into a single nics list, insert mgmt network if - needed. - nics_sources are lists of networks received from several sources - (server properties, relationships to networks, relationships to ports). - Merge them into a single list, and if the management network isn't present - there, prepend it as the first network. - """ - merged = [] - for nics in nics_sources: - merged.extend(nics) - if management_network_id is not None and \ - not any(nic['net-id'] == management_network_id for nic in merged): - merged.insert(0, {'net-id': management_network_id}) - return merged - - -def _normalize_nics(nics): - """Transform the NICs passed to the form expected by openstack. - - If both net-id and port-id are provided, remove net-id: it is ignored - by openstack anyway. - """ - def _normalize(nic): - if 'port-id' in nic and 'net-id' in nic: - nic = nic.copy() - del nic['net-id'] - return nic - return [_normalize(nic) for nic in nics] - - -def _prepare_server_nics(neutron_client, ctx, server): - """Update server['nics'] based on declared relationships. - - server['nics'] should contain the pre-declared nics, then the networks - that the server has a declared relationship to, then the networks - of the ports the server has a relationship to. - - If that doesn't include the management network, it should be prepended - as the first network. - - The management network id and name are stored in the server meta properties - """ - network_ids = get_openstack_ids_of_connected_nodes_by_openstack_type( - ctx, NETWORK_OPENSTACK_TYPE) - port_ids = get_openstack_ids_of_connected_nodes_by_openstack_type( - ctx, PORT_OPENSTACK_TYPE) - management_network_id, management_network_name = \ - _get_management_network_id_and_name(neutron_client, ctx) - - if management_network_id or management_network_name: - ctx.logger.warning( - 'A management_network_name was provided ({0}), ' - 'however this node property is deprecated. ' - 'Instead, use a ' - 'cloudify.openstack.server_connected_to_port ' - 'relationship to a ' - 'cloudify.openstack.nodes.Port type ' - 'or a cloudify.relationships.depends_on ' - 'derived relationship to a ' - 'cloudify.openstack.nodes.Network type ' - 'node template. ' - 'In Cloudify 3.4.x and above, relationships are ordered. ' - 'NICS on a Server are ordered according to ' - 'relationship order'.format(management_network_name)) - - port_networks = get_port_networks(neutron_client, port_ids) - - for port_network in port_networks: - for network_id in network_ids: - if network_id in port_network.get('net-id'): - network_ids.remove(network_id) - - nics = _merge_nics( - management_network_id, - server.get('nics', []), - [{'net-id': net_id} for net_id in network_ids], - port_networks) - - nics = _normalize_nics(nics) - - server['nics'] = nics - if management_network_id is not None: - server['meta']['cloudify_management_network_id'] = \ - management_network_id - if management_network_name is not None: - server['meta']['cloudify_management_network_name'] = \ - management_network_name - - -def _get_boot_volume_relationships(type_name, ctx): - ctx.logger.debug('Instance relationship target instances: {0}'.format(str([ - rel.target.instance.runtime_properties - for rel in ctx.instance.relationships]))) - targets = [ - rel.target.instance - for rel in ctx.instance.relationships - if rel.target.instance.runtime_properties.get( - OPENSTACK_TYPE_PROPERTY) == type_name and - rel.target.instance.runtime_properties.get(VOLUME_BOOTABLE, False)] - - if not targets: - return None - elif len(targets) > 1: - raise NonRecoverableError("2 boot volumes not supported") - return targets[0] - - -def _handle_boot_volume(server, ctx): - boot_volume = _get_boot_volume_relationships(VOLUME_OPENSTACK_TYPE, ctx) - if boot_volume: - boot_volume_id = boot_volume.runtime_properties[OPENSTACK_ID_PROPERTY] - ctx.logger.info('boot_volume_id: {0}'.format(boot_volume_id)) - # If a block device mapping already exists we shouldn't overwrite it - # completely - bdm = server.setdefault('block_device_mapping', {}) - bdm['vda'] = '{0}:::0'.format(boot_volume_id) - # Some nova configurations allow cross-az server-volume connections, so - # we can't treat that as an error. - if not server.get('availability_zone'): - server['availability_zone'] = \ - boot_volume.runtime_properties[OPENSTACK_AZ_PROPERTY] - - -@operation -@with_nova_client -@with_neutron_client -def create(nova_client, neutron_client, args, **kwargs): - """ - Creates a server. Exposes the parameters mentioned in - http://docs.openstack.org/developer/python-novaclient/api/novaclient.v1_1 - .servers.html#novaclient.v1_1.servers.ServerManager.create - """ - - external_server = use_external_resource(ctx, nova_client, - SERVER_OPENSTACK_TYPE) - - if external_server: - network_ids = \ - get_openstack_ids_of_connected_nodes_by_openstack_type( - ctx, NETWORK_OPENSTACK_TYPE) - port_ids = get_openstack_ids_of_connected_nodes_by_openstack_type( - ctx, PORT_OPENSTACK_TYPE) - _validate_external_server_nics( - external_server, - network_ids, - port_ids - ) - # need to reload server for full list of networks - external_server = use_external_resource(ctx, nova_client, - SERVER_OPENSTACK_TYPE) - _set_network_and_ip_runtime_properties(external_server) - _validate_external_server_keypair(nova_client) - return - - provider_context = provider(ctx) - - def rename(name): - return transform_resource_name(ctx, name) - - server = { - 'name': get_resource_id(ctx, SERVER_OPENSTACK_TYPE), - } - server.update(copy.deepcopy(ctx.node.properties['server'])) - server.update(copy.deepcopy(args)) - - _handle_boot_volume(server, ctx) - handle_image_from_relationship(server, 'image', ctx) - - if 'meta' not in server: - server['meta'] = dict() - - transform_resource_name(ctx, server) - - ctx.logger.debug( - "server.create() server before transformations: {0}".format(server)) - - if ('block_device_mapping' in server or - 'block_device_mapping_v2' in server) \ - and 'image' not in server: - # python-novaclient requires an image field even if BDM is used. - server['image'] = ctx.node.properties.get('image') - else: - _handle_image_or_flavor(server, nova_client, 'image') - _handle_image_or_flavor(server, nova_client, 'flavor') - - if provider_context.agents_security_group: - security_groups = server.get('security_groups', []) - asg = provider_context.agents_security_group['name'] - if asg not in security_groups: - security_groups.append(asg) - server['security_groups'] = security_groups - elif not server.get('security_groups', []): - # Make sure that if the server is connected to a security group - # from CREATE time so that there the user can control - # that there is never a time that a running server is not protected. - security_group_names = \ - get_openstack_names_of_connected_nodes_by_openstack_type( - ctx, - SECURITY_GROUP_OPENSTACK_TYPE) - server['security_groups'] = security_group_names - - # server keypair handling - keypair_id = get_openstack_id_of_single_connected_node_by_openstack_type( - ctx, KEYPAIR_OPENSTACK_TYPE, True) - - if 'key_name' in server: - if keypair_id: - raise NonRecoverableError("server can't both have the " - '"key_name" nested property and be ' - 'connected to a keypair via a ' - 'relationship at the same time') - server['key_name'] = rename(server['key_name']) - elif keypair_id: - server['key_name'] = _get_keypair_name_by_id(nova_client, keypair_id) - elif provider_context.agents_keypair: - server['key_name'] = provider_context.agents_keypair['name'] - else: - server['key_name'] = None - ctx.logger.info( - 'server must have a keypair, yet no keypair was connected to the ' - 'server node, the "key_name" nested property ' - "wasn't used, and there is no agent keypair in the provider " - "context. Agent installation can have issues.") - - _fail_on_missing_required_parameters( - server, - ('name', 'flavor'), - 'server') - - _prepare_server_nics(neutron_client, ctx, server) - - # server group handling - server_group_id = \ - get_openstack_id_of_single_connected_node_by_openstack_type( - ctx, SERVER_GROUP_OPENSTACK_TYPE, True) - if server_group_id: - scheduler_hints = server.get('scheduler_hints', {}) - scheduler_hints['group'] = server_group_id - server['scheduler_hints'] = scheduler_hints - - ctx.logger.debug( - "server.create() server after transformations: {0}".format(server)) - - userdata.handle_userdata(server) - - ctx.logger.info("Creating VM with parameters: {0}".format(str(server))) - # Store the server dictionary contents in runtime properties - assign_payload_as_runtime_properties(ctx, SERVER_OPENSTACK_TYPE, server) - ctx.logger.debug( - "Asking Nova to create server. All possible parameters are: [{0}]" - .format(','.join(server.keys()))) - - try: - s = nova_client.servers.create(**server) - except nova_exceptions.BadRequest as e: - if 'Block Device Mapping is Invalid' in str(e): - return ctx.operation.retry( - message='Block Device Mapping is not created yet', - retry_after=30) - raise - ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = s.id - ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] = \ - SERVER_OPENSTACK_TYPE - ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = server['name'] - - -def get_port_networks(neutron_client, port_ids): - - def get_network(port_id): - port = neutron_client.show_port(port_id) - return { - 'net-id': port['port']['network_id'], - 'port-id': port['port']['id'] - } - - return map(get_network, port_ids) - - -@operation -@with_nova_client -def start(nova_client, - start_retry_interval=30, - private_key_path='', - **kwargs): - - server = get_server_by_context(nova_client) - - if is_external_resource_not_conditionally_created(ctx): - ctx.logger.info('Validating external server is started') - if server.status != SERVER_STATUS_ACTIVE: - raise NonRecoverableError( - 'Expected external resource server {0} to be in ' - '"{1}" status'.format(server.id, SERVER_STATUS_ACTIVE)) - return - - if server.status == SERVER_STATUS_ACTIVE: - ctx.logger.info('Server is {0}'.format(server.status)) - - if ctx.node.properties['use_password']: - private_key = _get_private_key(private_key_path) - ctx.logger.debug('retrieving password for server') - password = server.get_password(private_key) - - if not password: - return ctx.operation.retry( - message='Waiting for server to post generated password', - retry_after=start_retry_interval) - - ctx.instance.runtime_properties[ADMIN_PASSWORD_PROPERTY] = password - ctx.logger.info('Server has been set with a password') - - _set_network_and_ip_runtime_properties(server) - return - - server_task_state = getattr(server, OS_EXT_STS_TASK_STATE) - - if server.status == SERVER_STATUS_SHUTOFF and \ - server_task_state != SERVER_TASK_STATE_POWERING_ON: - ctx.logger.info('Server is in {0} status - starting server...'.format( - SERVER_STATUS_SHUTOFF)) - server.start() - server_task_state = SERVER_TASK_STATE_POWERING_ON - - if server.status == SERVER_STATUS_BUILD or \ - server_task_state == SERVER_TASK_STATE_POWERING_ON: - return ctx.operation.retry( - message='Waiting for server to be in {0} state but is in {1}:{2} ' - 'state. Retrying...'.format(SERVER_STATUS_ACTIVE, - server.status, - server_task_state), - retry_after=start_retry_interval) - - try: - server_state_fault = server.fault['message'] - except (AttributeError, KeyError): - server_state_fault = \ - "{0}. A reason was not provided by Opentack.".format( - server_task_state) - - raise NonRecoverableError( - 'Unexpected server state {0}. Reason: {1}'.format( - server.status, - server_state_fault)) - - -@operation -@with_nova_client -def stop(nova_client, **kwargs): - """ - Stop server. - - Depends on OpenStack implementation, server.stop() might not be supported. - """ - if is_external_resource(ctx): - ctx.logger.info('Not stopping server since an external server is ' - 'being used') - return - - server = get_server_by_context(nova_client) - _server_stop(nova_client, server) - - -def _server_stop(nova_client, server): - server_task_state = getattr(server, OS_EXT_STS_TASK_STATE) - - if server_task_state == SERVER_TASK_STATE_POWERING_OFF: - return ctx.operation.retry( - message="Server is stopping", - retry_after=30) - elif server.status != SERVER_STATUS_SHUTOFF: - nova_client.servers.stop(server) - - # wait 10 seconds before next check - time.sleep(10) - - server = nova_client.servers.get(server.id) - if server.status != SERVER_STATUS_SHUTOFF: - return ctx.operation.retry( - message="Server has {} state." - .format(server.status), - retry_after=30) - else: - ctx.logger.info('Server is already stopped') - - -def _server_start(nova_client, server): - if server.status != SERVER_STATUS_ACTIVE: - nova_client.servers.start(server) - - # wait 10 seconds before next check - time.sleep(10) - - server = nova_client.servers.get(server.id) - if server.status != SERVER_STATUS_SHUTOFF: - return ctx.operation.retry( - message="Server has {} state." - .format(server.status), - retry_after=30) - else: - ctx.logger.info('Server is already started?') - - -@operation -@with_nova_client -def reboot(nova_client, reboot_type='soft', **kwargs): - - server = get_server_by_context(nova_client) - - if ctx.operation.retry_number == 0: - if reboot_type.upper() not in ['HARD', 'SOFT']: - raise NonRecoverableError( - 'Unexpected reboot type: {}. ' - 'Valid values: SOFT or HARD.'.format( - reboot_type)) - nova_client.servers.reboot(server, reboot_type.upper()) - - server = nova_client.servers.get(server.id) - - if server.status in [SERVER_STATUS_REBOOT, - SERVER_STATUS_HARD_REBOOT, - SERVER_STATUS_UNKNOWN]: - return ctx.operation.retry( - message="Server has {0} state. Waiting.".format( - server.status), - retry_after=30) - - elif server.status == SERVER_STATUS_ACTIVE: - ctx.logger.info( - 'Reboot operation finished in {} state.'.format( - server.status)) - - elif server.status == SERVER_STATUS_ERROR: - raise NonRecoverableError( - 'Reboot operation finished in {} state.'.format( - server.status)) - - else: - raise NonRecoverableError( - 'Reboot operation finished in unexpected state: {}'.format( - server.state)) - - -def _server_suspend(nova_client, server): - if server.status == SERVER_STATUS_ACTIVE: - nova_client.servers.suspend(server) - else: - ctx.logger.info('Server is already suspended?') - - -def _server_resume(nova_client, server): - if server.status == SERVER_STATUS_SUSPENDED: - nova_client.servers.resume(server) - else: - ctx.logger.info('Server is already resumed?') - - -def _get_snapshot_name(ctx, kwargs): - return "vm-{}-{}-{}".format( - get_openstack_id(ctx), kwargs["snapshot_name"], - "increment" if kwargs["snapshot_incremental"] else "backup" - ) - - -def _check_finished_upload(nova_client, server, waiting_list): - # check that we created images - ctx.logger.info("Check upload state....") - - server = nova_client.servers.get(server.id) - state = getattr(server, OS_EXT_STS_TASK_STATE) - if state not in waiting_list: - return - - return ctx.operation.retry( - message="Server has {}/{} state." - .format(server.status, state), - retry_after=30) - - -@operation -@with_nova_client -def freeze_suspend(nova_client, **kwargs): - """ - Create server backup. - """ - server = get_server_by_context(nova_client) - ctx.logger.info("Suspend VM {}".format(server.human_id)) - _server_suspend(nova_client, server) - - -@operation -@with_nova_client -def freeze_resume(nova_client, **kwargs): - """ - Create server backup. - """ - server = get_server_by_context(nova_client) - ctx.logger.info("Resume VM {}".format(server.human_id)) - _server_resume(nova_client, server) - - -@operation -@with_nova_client -@with_glance_client -def snapshot_create(nova_client, glance_client, **kwargs): - """ - Create server backup. - """ - server = get_server_by_context(nova_client) - - ctx.logger.info("Create snapshot for {}".format(server.human_id)) - - snapshot_name = _get_snapshot_name(ctx, kwargs) - snapshot_rotation = int(kwargs["snapshot_rotation"]) - snapshot_incremental = kwargs["snapshot_incremental"] - - image_id, _ = _get_image(glance_client, snapshot_name, - snapshot_incremental) - if image_id: - raise NonRecoverableError("Snapshot {} already exists." - .format(snapshot_name)) - - # check current state before upload - _check_finished_upload(nova_client, server, ['image_uploading']) - - # we save backupstate for get last state of creation - backupstate = ctx.instance.runtime_properties.get("backupstate") - if backupstate != snapshot_name: - if not snapshot_incremental: - server.backup(snapshot_name, kwargs["snapshot_type"], - snapshot_rotation) - ctx.logger.info("Server backup {} creation started" - .format(repr(snapshot_name))) - else: - server.create_image(snapshot_name) - ctx.logger.info("Server snapshot {} creation started" - .format(repr(snapshot_name))) - ctx.instance.runtime_properties["backupstate"] = snapshot_name - - # wait for finish upload - _check_finished_upload(nova_client, server, ['image_uploading']) - ctx.instance.runtime_properties["backupstate"] = "done" - - -def _get_image(glance_client, snapshot_name, snapshot_incremental): - backtype = 'snapshot' if snapshot_incremental else 'backup' - - for image in glance_client.images.list(filters={"name": snapshot_name}): - ctx.logger.info("Found image {}".format(repr(image))) - if image['name'] != snapshot_name: - continue - - if image['image_type'] != backtype: - continue - - return image['id'], image['status'] - return None, None - - -@operation -@with_nova_client -@with_glance_client -def snapshot_apply(nova_client, glance_client, **kwargs): - """ - Create server backup. - """ - server = get_server_by_context(nova_client) - snapshot_name = _get_snapshot_name(ctx, kwargs) - - snapshot_incremental = kwargs["snapshot_incremental"] - - if snapshot_incremental: - ctx.logger.info("Apply snapshot {} for {}" - .format(snapshot_name, server.human_id)) - else: - ctx.logger.info("Apply backup {} for {}" - .format(snapshot_name, server.human_id)) - - image_id, _ = _get_image(glance_client, snapshot_name, - snapshot_incremental) - if not image_id: - raise NonRecoverableError("No snapshots found with name: {}." - .format(snapshot_name)) - - _check_finished_upload(nova_client, server, ['image_uploading', - 'rebuild_spawning']) - - restorestate = ctx.instance.runtime_properties.get("restorestate") - if restorestate != snapshot_name: - # we stop before restore - _server_stop(nova_client, server) - - ctx.logger.info("Rebuild {} with {}" - .format(server.human_id, snapshot_name)) - server.rebuild(image_id) - ctx.instance.runtime_properties["restorestate"] = snapshot_name - - # we have applied backup so we can start instance - server = nova_client.servers.get(server.id) - _check_finished_upload(nova_client, server, ['rebuild_spawning']) - - _server_start(nova_client, server) - ctx.instance.runtime_properties["restorestate"] = "done" - - -def _image_delete(glance_client, snapshot_name, snapshot_incremental): - image_id, status = _get_image(glance_client, snapshot_name, - snapshot_incremental) - if not image_id: - ctx.logger.info("No snapshots found with name: {}." - .format(snapshot_name)) - return - - if status == 'active': - glance_client.images.delete(image_id) - time.sleep(10) - - # check that we deleted any backups with such name - image_id, _ = _get_image(glance_client, snapshot_name, - snapshot_incremental) - if image_id: - return ctx.operation.retry(message='{} is still alive' - .format(image_id), - retry_after=30) - - -@operation -@with_nova_client -@with_glance_client -def snapshot_delete(nova_client, glance_client, **kwargs): - """ - Delete server backup. - """ - server = get_server_by_context(nova_client) - snapshot_name = _get_snapshot_name(ctx, kwargs) - - snapshot_incremental = kwargs["snapshot_incremental"] - - if snapshot_incremental: - ctx.logger.info("Remove snapshot {} for {}" - .format(snapshot_name, server.human_id)) - else: - ctx.logger.info("Remove backup {} for {}" - .format(snapshot_name, server.human_id)) - - return _image_delete(glance_client, snapshot_name, snapshot_incremental) - - -@operation -@with_nova_client -def delete(nova_client, **kwargs): - if not is_external_resource(ctx): - ctx.logger.info('deleting server') - server = get_server_by_context(nova_client) - nova_client.servers.delete(server) - _wait_for_server_to_be_deleted(nova_client, server) - else: - ctx.logger.info('not deleting server since an external server is ' - 'being used') - - delete_runtime_properties(ctx, RUNTIME_PROPERTIES_KEYS) - - -@operation -@with_nova_client -def list_servers(nova_client, args, **kwargs): - server_list = nova_client.servers.list(**args) - add_list_to_runtime_properties(ctx, SERVER_OPENSTACK_TYPE, server_list) - - -def _wait_for_server_to_be_deleted(nova_client, - server_id, - timeout=120, - sleep_interval=5): - wait_time = time.time() + timeout - while time.time() < wait_time: - try: - server = nova_client.servers.get(server_id) - ctx.logger.debug('Waiting for server "{}" to be deleted. current' - ' status: {}'.format(server.id, server.status)) - time.sleep(sleep_interval) - except nova_exceptions.NotFound: - return - # recoverable error - raise RuntimeError('Server {} has not been deleted. waited for {} seconds' - .format(server_id, timeout)) - - -def get_server_by_context(nova_client): - return nova_client.servers.get(get_openstack_id(ctx)) - - -def _set_network_and_ip_runtime_properties(server): - - ips = {} - - if not server.networks: - raise NonRecoverableError( - 'The server was created but not attached to a network. ' - 'Cloudify requires that a server is connected to ' - 'at least one port.' - ) - - manager_network_ip = None - management_network_name = server.metadata.get( - 'cloudify_management_network_name') - - ipv4_addrs = [] - ipv6_addrs = [] - for network, network_ips in server.networks.items(): - if (management_network_name and - network == management_network_name) or not \ - manager_network_ip: - manager_network_ip = next(iter(network_ips or []), None) - ips[network] = network_ips - ipv4_addrs = list(set( - ipv4_addrs + [ip for ip in network_ips if netaddr.valid_ipv4(ip)])) - ipv6_addrs = list(set( - ipv6_addrs + [ip for ip in network_ips if netaddr.valid_ipv6(ip)])) - - ctx.instance.runtime_properties[NETWORKS_PROPERTY] = ips - ctx.instance.runtime_properties[IPV4_LIST_PROPERTY] = ipv4_addrs - ctx.instance.runtime_properties[IPV6_LIST_PROPERTY] = ipv6_addrs - ctx.instance.runtime_properties[IP_PROPERTY] = manager_network_ip - if server.accessIPv4: - ctx.instance.runtime_properties[IPV4_PROPERTY] = server.accessIPv4 - elif netaddr.valid_ipv4(manager_network_ip): - ctx.instance.runtime_properties[IPV4_PROPERTY] = manager_network_ip - elif len(ipv4_addrs) == 1: - ctx.instance.runtime_properties[IPV4_PROPERTY] = ipv4_addrs[0] - else: - ctx.instance.runtime_properties[IPV4_PROPERTY] = None - if server.accessIPv6: - ctx.instance.runtime_properties[IPV6_PROPERTY] = server.accessIPv6 - elif netaddr.valid_ipv6(manager_network_ip): - ctx.instance.runtime_properties[IPV6_PROPERTY] = manager_network_ip - elif len(ipv6_addrs) == 1: - ctx.instance.runtime_properties[IPV6_PROPERTY] = ipv6_addrs[0] - else: - ctx.instance.runtime_properties[IPV6_PROPERTY] = None - - -@operation -@with_nova_client -def connect_floatingip(nova_client, fixed_ip='', **kwargs): - server_id = get_openstack_id(ctx.source) - floating_ip_id = get_openstack_id(ctx.target) - - if is_external_relationship_not_conditionally_created(ctx): - ctx.logger.info('Validating external floatingip and server ' - 'are associated') - if nova_client.floating_ips.get(floating_ip_id).instance_id ==\ - server_id: - return - raise NonRecoverableError( - 'Expected external resources server {0} and floating-ip {1} to be ' - 'connected'.format(server_id, floating_ip_id)) - - floating_ip_address = ctx.target.instance.runtime_properties[ - IP_ADDRESS_PROPERTY] - server = nova_client.servers.get(server_id) - server.add_floating_ip(floating_ip_address, fixed_ip or None) - - server = nova_client.servers.get(server_id) - all_server_ips = reduce(operator.add, server.networks.values()) - if floating_ip_address not in all_server_ips: - return ctx.operation.retry(message='Failed to assign floating ip {0}' - ' to machine {1}.' - .format(floating_ip_address, server_id)) - - -@operation -@with_nova_client -@with_neutron_client -def disconnect_floatingip(nova_client, neutron_client, **kwargs): - if is_external_relationship(ctx): - ctx.logger.info('Not disassociating floatingip and server since ' - 'external floatingip and server are being used') - return - - server_id = get_openstack_id(ctx.source) - ctx.logger.info("Remove floating ip {0}".format( - ctx.target.instance.runtime_properties[IP_ADDRESS_PROPERTY])) - server_floating_ip = get_server_floating_ip(neutron_client, server_id) - if server_floating_ip: - server = nova_client.servers.get(server_id) - server.remove_floating_ip(server_floating_ip['floating_ip_address']) - ctx.logger.info("Floating ip {0} detached from server" - .format(server_floating_ip['floating_ip_address'])) - - -@operation -@with_nova_client -def connect_security_group(nova_client, **kwargs): - server_id = get_openstack_id(ctx.source) - security_group_id = get_openstack_id(ctx.target) - security_group_name = ctx.target.instance.runtime_properties[ - OPENSTACK_NAME_PROPERTY] - - if is_external_relationship_not_conditionally_created(ctx): - ctx.logger.info('Validating external security group and server ' - 'are associated') - server = nova_client.servers.get(server_id) - if [sg for sg in server.list_security_group() if sg.id == - security_group_id]: - return - raise NonRecoverableError( - 'Expected external resources server {0} and security-group {1} to ' - 'be connected'.format(server_id, security_group_id)) - - def group_matches(security_group): - return ( - security_group_id == security_group.id or - security_group_name == security_group.name - ) - # Since some security groups are already attached in - # create this will ensure that they are not attached twice. - server = nova_client.servers.get(server_id) - present = any(map(group_matches, server.list_security_group())) - # to support nova security groups as well, - # we connect the security group by name - # (as connecting by id - # doesn't seem to work well for nova SGs) - if not present: - server.add_security_group(security_group_name) - - _validate_security_group_and_server_connection_status(nova_client, - server_id, - security_group_id, - security_group_name, - is_connected=True) - - -@operation -@with_nova_client -def disconnect_security_group(nova_client, **kwargs): - if is_external_relationship(ctx): - ctx.logger.info('Not disconnecting security group and server since ' - 'external security group and server are being used') - return - - server_id = get_openstack_id(ctx.source) - security_group_id = get_openstack_id(ctx.target) - security_group_name = ctx.target.instance.runtime_properties[ - OPENSTACK_NAME_PROPERTY] - server = nova_client.servers.get(server_id) - # to support nova security groups as well, we disconnect the security group - # by name (as disconnecting by id doesn't seem to work well for nova SGs) - try: - server.remove_security_group(security_group_name) - except nova_exceptions.NotFound: - ctx.logger.warn("Security group '{0}' (id: {1}) is not attached " - "to server instance {2}; skipping" - .format(security_group_name, security_group_id, - server_id)) - else: - _validate_security_group_and_server_connection_status( - nova_client, server_id, security_group_id, security_group_name, - is_connected=False) - - -@operation -@with_nova_client -@with_cinder_client -def attach_volume(nova_client, - cinder_client, - status_attempts=10, - status_timeout=2, - **kwargs): - - server_id = get_openstack_id(ctx.target) - volume_id = get_openstack_id(ctx.source) - - if is_external_relationship_not_conditionally_created(ctx): - ctx.logger.info('Validating external volume and server ' - 'are connected') - attachment = volume.get_attachment(cinder_client=cinder_client, - volume_id=volume_id, - server_id=server_id) - if attachment: - return - else: - raise NonRecoverableError( - 'Expected external resources server {0} and volume {1} to be ' - 'connected'.format(server_id, volume_id)) - - # Note: The 'device_name' property should actually be a property of the - # relationship between a server and a volume; It'll move to that - # relationship type once relationship properties are better supported. - device = ctx.source.node.properties[volume.DEVICE_NAME_PROPERTY] - nova_client.volumes.create_server_volume( - server_id, - volume_id, - device if device != 'auto' else None) - try: - vol, wait_succeeded = volume.wait_until_status( - cinder_client=cinder_client, - volume_id=volume_id, - status=volume.VOLUME_STATUS_IN_USE, - num_tries=status_attempts, - timeout=status_timeout - ) - if not wait_succeeded: - raise RecoverableError( - 'Waiting for volume status {0} failed - detaching volume and ' - 'retrying..'.format(volume.VOLUME_STATUS_IN_USE)) - if device == 'auto': - # The device name was assigned automatically so we - # query the actual device name - attachment = volume.get_attachment( - cinder_client=cinder_client, - volume_id=volume_id, - server_id=server_id - ) - device_name = attachment['device'] - ctx.logger.info('Detected device name for attachment of volume ' - '{0} to server {1}: {2}' - .format(volume_id, server_id, device_name)) - ctx.source.instance.runtime_properties[ - volume.DEVICE_NAME_PROPERTY] = device_name - except Exception as e: - if not isinstance(e, NonRecoverableError): - _prepare_attach_volume_to_be_repeated( - nova_client, cinder_client, server_id, volume_id, - status_attempts, status_timeout) - raise - - -def _prepare_attach_volume_to_be_repeated( - nova_client, cinder_client, server_id, volume_id, - status_attempts, status_timeout): - - ctx.logger.info('Cleaning after a failed attach_volume() call') - try: - _detach_volume(nova_client, cinder_client, server_id, volume_id, - status_attempts, status_timeout) - except Exception as e: - ctx.logger.error('Cleaning after a failed attach_volume() call failed ' - 'raising a \'{0}\' exception.'.format(e)) - raise NonRecoverableError(e) - - -def _detach_volume(nova_client, cinder_client, server_id, volume_id, - status_attempts, status_timeout): - attachment = volume.get_attachment(cinder_client=cinder_client, - volume_id=volume_id, - server_id=server_id) - if attachment: - nova_client.volumes.delete_server_volume(server_id, attachment['id']) - volume.wait_until_status(cinder_client=cinder_client, - volume_id=volume_id, - status=volume.VOLUME_STATUS_AVAILABLE, - num_tries=status_attempts, - timeout=status_timeout) - - -@operation -@with_nova_client -@with_cinder_client -def detach_volume(nova_client, - cinder_client, - status_attempts=10, - status_timeout=2, - **kwargs): - - if is_external_relationship(ctx): - ctx.logger.info('Not detaching volume from server since ' - 'external volume and server are being used') - return - - server_id = get_openstack_id(ctx.target) - volume_id = get_openstack_id(ctx.source) - - _detach_volume(nova_client, cinder_client, server_id, volume_id, - status_attempts, status_timeout) - - -def _fail_on_missing_required_parameters(obj, required_parameters, hint_where): - for k in required_parameters: - if k not in obj: - raise NonRecoverableError( - "Required parameter '{0}' is missing (under host's " - "properties.{1}). Required parameters are: {2}" - .format(k, hint_where, required_parameters)) - - -def _validate_external_server_keypair(nova_client): - keypair_id = get_openstack_id_of_single_connected_node_by_openstack_type( - ctx, KEYPAIR_OPENSTACK_TYPE, True) - if not keypair_id: - return - - keypair_instance_id = \ - [node_instance_id for node_instance_id, runtime_props in - ctx.capabilities.get_all().iteritems() if - runtime_props.get(OPENSTACK_ID_PROPERTY) == keypair_id][0] - keypair_node_properties = _get_properties_by_node_instance_id( - keypair_instance_id) - if not is_external_resource_by_properties(keypair_node_properties): - raise NonRecoverableError( - "Can't connect a new keypair node to a server node " - "with '{0}'=True".format(USE_EXTERNAL_RESOURCE_PROPERTY)) - - server = get_server_by_context(nova_client) - if keypair_id == _get_keypair_name_by_id(nova_client, server.key_name): - return - raise NonRecoverableError( - "Expected external resources server {0} and keypair {1} to be " - "connected".format(server.id, keypair_id)) - - -def _get_keypair_name_by_id(nova_client, key_name): - keypair = nova_client.cosmo_get_named(KEYPAIR_OPENSTACK_TYPE, key_name) - return keypair.id - - -def _validate_external_server_nics(external_server, network_ids, port_ids): - # check currently attached ports - interfaces = external_server.interface_list() - attached_ports = set([interface.port_id for interface in interfaces]) - attached_nets = set([interface.net_id for interface in interfaces]) - - already_attached_ports = [port_id for port_id in attached_ports - if port_id in port_ids] - already_attached_nets = [net_id for net_id in attached_nets - if net_id in network_ids] - if already_attached_ports or already_attached_nets: - raise NonRecoverableError( - 'Several ports/networks already connected to external server ' - '{0}: Networks - {1}; Ports - {2}' - .format(external_server.human_id, - already_attached_nets, - already_attached_ports)) - - # attach ports - for port_id in port_ids: - ctx.logger.info('Attaching port {0}...'.format(port_id)) - external_server.interface_attach(port_id=port_id, net_id=None, - fixed_ip=None) - ctx.logger.info( - 'Successfully attached port {0} to device (server) id {1}.' - .format(port_id, external_server.human_id)) - - # check currently attached networks, can be attached by port - attached_nets = [interface.net_id - for interface in external_server.interface_list()] - for net_id in network_ids: - if net_id not in attached_nets: - ctx.logger.info('Attaching network {0}...'.format(net_id)) - external_server.interface_attach(port_id=None, net_id=net_id, - fixed_ip=None) - ctx.logger.info( - 'Successfully attached network {0} to device (server) id {1}.' - .format(net_id, external_server.human_id)) - else: - ctx.logger.info( - 'Skipping network {0} attachment, because it is already ' - 'attached to device (server) id {1}.' - .format(net_id, external_server.human_id)) - - -def _get_properties_by_node_instance_id(node_instance_id): - if ctx._local: - instance = ctx._endpoint.get_node_instance(node_instance_id) - node = ctx._endpoint.get_node(instance.node_id) - return node.properties - else: - client = get_rest_client() - node_instance = client.node_instances.get(node_instance_id) - node = client.nodes.get(ctx.deployment.id, node_instance.node_id) - return node.properties - - -@operation -@with_nova_client -def creation_validation(nova_client, args, **kwargs): - - def validate_server_property_value_exists(server_props, property_name): - ctx.logger.debug( - 'checking whether {0} exists...'.format(property_name)) - - serv_props_copy = server_props.copy() - try: - handle_image_from_relationship(serv_props_copy, 'image', ctx) - _handle_image_or_flavor(serv_props_copy, nova_client, - property_name) - except (NonRecoverableError, nova_exceptions.NotFound) as e: - # temporary error - once image/flavor_name get removed, these - # errors won't be relevant anymore - err = str(e) - ctx.logger.error('VALIDATION ERROR: ' + err) - raise NonRecoverableError(err) - - prop_value_id = str(serv_props_copy[property_name]) - prop_values = list(nova_client.cosmo_list(property_name)) - for f in prop_values: - if prop_value_id == f.id: - ctx.logger.debug('OK: {0} exists'.format(property_name)) - return - err = '{0} {1} does not exist'.format(property_name, prop_value_id) - ctx.logger.error('VALIDATION ERROR: ' + err) - if prop_values: - ctx.logger.info('list of available {0}s:'.format(property_name)) - for f in prop_values: - ctx.logger.info(' {0:>10} - {1}'.format(f.id, f.name)) - else: - ctx.logger.info('there are no available {0}s'.format( - property_name)) - raise NonRecoverableError(err) - - validate_resource(ctx, nova_client, SERVER_OPENSTACK_TYPE) - - server_props = dict(ctx.node.properties['server'], **args) - validate_server_property_value_exists(server_props, 'flavor') - - -def _get_private_key(private_key_path): - pk_node_by_rel = \ - get_single_connected_node_by_openstack_type( - ctx, KEYPAIR_OPENSTACK_TYPE, True) - - if private_key_path: - if pk_node_by_rel: - raise NonRecoverableError("server can't both have a " - '"private_key_path" input and be ' - 'connected to a keypair via a ' - 'relationship at the same time') - key_path = private_key_path - else: - if pk_node_by_rel and pk_node_by_rel.properties['private_key_path']: - key_path = pk_node_by_rel.properties['private_key_path'] - else: - key_path = ctx.bootstrap_context.cloudify_agent.agent_key_path - - if key_path: - key_path = os.path.expanduser(key_path) - if os.path.isfile(key_path): - return key_path - - err_message = 'Cannot find private key file' - if key_path: - err_message += '; expected file path was {0}'.format(key_path) - raise NonRecoverableError(err_message) - - -def _validate_security_group_and_server_connection_status( - nova_client, server_id, sg_id, sg_name, is_connected): - - # verifying the security group got connected or disconnected - # successfully - this is due to Openstack concurrency issues that may - # take place when attempting to connect/disconnect multiple SGs to the - # same server at the same time - server = nova_client.servers.get(server_id) - - if is_connected ^ any(sg for sg in server.list_security_group() if - sg.id == sg_id): - raise RecoverableError( - message='Security group {0} did not get {2} server {1} ' - 'properly' - .format( - sg_name, - server.name, - 'connected to' if is_connected else 'disconnected from')) - - -def _handle_image_or_flavor(server, nova_client, prop_name): - if prop_name not in server and '{0}_name'.format(prop_name) not in server: - # setting image or flavor - looking it up by name; if not found, then - # the value is assumed to be the id - server[prop_name] = ctx.node.properties[prop_name] - - # temporary error message: once the 'image' and 'flavor' properties - # become mandatory, this will become less relevant - if not server[prop_name]: - raise NonRecoverableError( - 'must set {0} by either setting a "{0}" property or by setting' - ' a "{0}" or "{0}_name" (deprecated) field under the "server" ' - 'property'.format(prop_name)) - - image_or_flavor = \ - nova_client.cosmo_get_if_exists(prop_name, name=server[prop_name]) - if image_or_flavor: - server[prop_name] = image_or_flavor.id - else: # Deprecated sugar - if '{0}_name'.format(prop_name) in server: - prop_name_plural = nova_client.cosmo_plural(prop_name) - server[prop_name] = \ - getattr(nova_client, prop_name_plural).find( - name=server['{0}_name'.format(prop_name)]).id - del server['{0}_name'.format(prop_name)] diff --git a/nova_plugin/server_group.py b/nova_plugin/server_group.py deleted file mode 100644 index 4aefdda8..00000000 --- a/nova_plugin/server_group.py +++ /dev/null @@ -1,80 +0,0 @@ -######### -# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - -from cloudify import ctx -from cloudify.decorators import operation -from openstack_plugin_common import ( - with_nova_client, - validate_resource, - use_external_resource, - is_external_resource, - delete_runtime_properties, - get_openstack_id, - set_openstack_runtime_properties, - create_object_dict, - add_list_to_runtime_properties, - COMMON_RUNTIME_PROPERTIES_KEYS -) - -RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS -SERVER_GROUP_OPENSTACK_TYPE = 'server_group' - - -@operation -@with_nova_client -def create(nova_client, args, **kwargs): - if use_external_resource(ctx, nova_client, SERVER_GROUP_OPENSTACK_TYPE): - return - - server_grp = create_object_dict( - ctx, - SERVER_GROUP_OPENSTACK_TYPE, - args, - {'policies': [ctx.node.properties['policy']]}) - - server_grp = nova_client.server_groups.create(**server_grp) - set_openstack_runtime_properties(ctx, - server_grp, - SERVER_GROUP_OPENSTACK_TYPE) - - -@operation -@with_nova_client -def delete(nova_client, **kwargs): - if not is_external_resource(ctx): - ctx.logger.info('deleting server group') - - nova_client.server_groups.delete(get_openstack_id(ctx)) - else: - ctx.logger.info('not deleting server group since an external server ' - 'group is being used') - - delete_runtime_properties(ctx, RUNTIME_PROPERTIES_KEYS) - - -@with_nova_client -def list_servergroups(nova_client, args, **kwargs): - server_group_list = nova_client.server_groups.list(**args) - add_list_to_runtime_properties(ctx, - SERVER_GROUP_OPENSTACK_TYPE, - server_group_list) - - -@operation -@with_nova_client -def creation_validation(nova_client, **kwargs): - validate_resource(ctx, nova_client, SERVER_GROUP_OPENSTACK_TYPE) - - ctx.logger.debug('OK: server group configuration is valid') diff --git a/nova_plugin/tests/resources/test-keypair-validation-blueprint.yaml b/nova_plugin/tests/resources/test-keypair-validation-blueprint.yaml deleted file mode 100644 index 22b7fb53..00000000 --- a/nova_plugin/tests/resources/test-keypair-validation-blueprint.yaml +++ /dev/null @@ -1,23 +0,0 @@ -tosca_definitions_version: cloudify_dsl_1_3 - -imports: - - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-manager/4.1/resources/rest-service/cloudify/types/types.yaml - - plugin.yaml - -inputs: - private_key: {} - is_keypair_external: {} - - -node_templates: - - keypair: - type: cloudify.openstack.nodes.KeyPair - properties: - private_key_path: { get_input: private_key } - use_external_resource: { get_input: is_keypair_external } - openstack_config: - username: aaa - password: aaa - tenant_name: aaa - auth_url: aaa diff --git a/nova_plugin/tests/resources/test-server-create-secgroup.yaml b/nova_plugin/tests/resources/test-server-create-secgroup.yaml deleted file mode 100644 index 70b75f6b..00000000 --- a/nova_plugin/tests/resources/test-server-create-secgroup.yaml +++ /dev/null @@ -1,31 +0,0 @@ -tosca_definitions_version: cloudify_dsl_1_3 - -imports: - - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-manager/4.1/resources/rest-service/cloudify/types/types.yaml - - plugin.yaml - -inputs: - use_password: - type: boolean - default: false - -node_templates: - - security_group: - type: cloudify.openstack.nodes.SecurityGroup - - server: - type: cloudify.openstack.nodes.Server - properties: - install_agent: false - use_password: { get_input: use_password } - openstack_config: - username: aaa - password: aaa - tenant_name: aaa - auth_url: aaa - server: - key_name: 'aa' - relationships: - - type: cloudify.openstack.server_connected_to_security_group - target: security_group diff --git a/nova_plugin/tests/resources/test-start-operation-retry-blueprint.yaml b/nova_plugin/tests/resources/test-start-operation-retry-blueprint.yaml deleted file mode 100644 index 275806cf..00000000 --- a/nova_plugin/tests/resources/test-start-operation-retry-blueprint.yaml +++ /dev/null @@ -1,31 +0,0 @@ -tosca_definitions_version: cloudify_dsl_1_3 - -imports: - - https://raw.githubusercontent.com/cloudify-cosmo/cloudify-manager/4.1/resources/rest-service/cloudify/types/types.yaml - - plugin.yaml - -inputs: - use_password: - type: boolean - default: false - -node_templates: - server: - type: cloudify.openstack.nodes.Server - properties: - install_agent: false - use_password: { get_input: use_password } - server: - key_name: key - scheduler_hints: - group: affinity-group-id - openstack_config: - username: aaa - password: aaa - tenant_name: aaa - auth_url: aaa - interfaces: - cloudify.interfaces.lifecycle: - start: - inputs: - start_retry_interval: 1 diff --git a/nova_plugin/tests/test_flavor.py b/nova_plugin/tests/test_flavor.py deleted file mode 100644 index 64a7b5c1..00000000 --- a/nova_plugin/tests/test_flavor.py +++ /dev/null @@ -1,248 +0,0 @@ -import mock -import unittest - -from cloudify.context import NODE_INSTANCE -from cloudify.context import BootstrapContext -from cloudify.state import current_ctx -import openstack_plugin_common.tests.test as common_test - -from cloudify.mocks import ( - MockContext, - MockNodeInstanceContext, - MockNodeContext -) -from openstack_plugin_common import ( - OPENSTACK_ID_PROPERTY, - OPENSTACK_NAME_PROPERTY, - OPENSTACK_TYPE_PROPERTY -) -from nova_plugin.flavor import ( - FLAVOR_OPENSTACK_TYPE, - EXTRA_SPECS_PROPERTY, - TENANTS_PROPERTY -) -import nova_plugin - - -class TestFlavor(unittest.TestCase): - test_id = 'test-id' - test_name = 'test-name' - updated_name = 'updated-name' - test_deployment_id = 'test-deployment-id' - - class MockFlavorOS: - def __init__(self, id, name): - self._id = id - self._name = name - self._set_keys_called = False - self._set_keys_last_call_params = {} - - @property - def id(self): - return self._id - - @property - def name(self): - return self._name - - def set_keys(self, extra_specs): - self._set_keys_called = True - self._set_keys_last_call_params = extra_specs - - def assert_set_keys_called_with(self, test, params): - test.assertTrue(self._set_keys_called) - test.assertEquals(self._set_keys_last_call_params, params) - - def to_dict(self): - return {'name': self.name, 'id': self.id} - - def mock_nova_client(self, mock_flavor): - nova_client = mock.MagicMock() - nova_client.flavors.create.return_value = mock_flavor - nova_client.flavors.list.return_value = [mock_flavor] - nova_client.flavors.find.return_value = mock.MagicMock( - id=self.test_name - ) - nova_client.flavor_access.add_tenant_access = mock.MagicMock() - - return nova_client - - def mock_ctx(self, - test_vars, - test_id, - test_deployment_id, - runtime_properties=None): - ctx = MockContext() - - ctx.node = MockNodeContext(properties=test_vars) - ctx.bootstrap_context = BootstrapContext( - common_test.BOOTSTRAP_CONTEXTS_WITHOUT_PREFIX[0] - ) - ctx.instance = MockNodeInstanceContext( - id=test_id, - runtime_properties=runtime_properties or {} - ) - ctx.deployment = mock.Mock() - ctx.deployment.id = test_deployment_id - ctx.type = NODE_INSTANCE - ctx.logger = mock.Mock() - - current_ctx.set(ctx) - return ctx - - @mock.patch( - 'openstack_plugin_common._handle_kw', - autospec=True, - return_value=None - ) - def test_flavor_create_and_delete(self, *_): - # given - test_vars = { - 'flavor': {}, - 'resource_id': '' - } - - ctx = self.mock_ctx(test_vars, self.test_id, self.test_deployment_id) - nova_plugin.flavor.ctx = ctx - mock_flavor = self.MockFlavorOS(self.test_id, self.test_name) - nova_client = self.mock_nova_client(mock_flavor) - - # when (create) - nova_plugin.flavor.create(nova_client, {}) - - # then (create) - self.assertEqual( - self.test_name, - ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] - ) - self.assertEqual( - self.test_id, - ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] - ) - self.assertEqual( - FLAVOR_OPENSTACK_TYPE, - ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] - ) - - # when (delete) - nova_plugin.flavor.delete(nova_client=nova_client) - - # then (delete) - self.assertNotIn( - OPENSTACK_ID_PROPERTY, - ctx.instance.runtime_properties - ) - self.assertNotIn( - OPENSTACK_NAME_PROPERTY, - ctx.instance.runtime_properties - ) - self.assertNotIn( - OPENSTACK_TYPE_PROPERTY, - ctx.instance.runtime_properties - ) - - @mock.patch( - 'openstack_plugin_common._handle_kw', - autospec=True, - return_value=None - ) - def test_flavor_create_and_delete_with_extra_specs_and_tenants(self, *_): - # given - test_vars_tenant_id = 'some_tenant_id' - test_vars_tenants = [test_vars_tenant_id] - test_vars_extra_specs = { - 'key1': 'value1', - 'key2': 'value2' - } - test_vars = { - 'flavor': {}, - 'extra_specs': test_vars_extra_specs, - 'tenants': test_vars_tenants, - 'resource_id': '' - } - - ctx = self.mock_ctx(test_vars, self.test_id, self.test_deployment_id) - nova_plugin.flavor.ctx = ctx - mock_flavor = self.MockFlavorOS(self.test_id, self.test_name) - nova_client = self.mock_nova_client(mock_flavor) - - # when (create) - nova_plugin.flavor.create(nova_client, {}) - - # then (create) - mock_flavor.assert_set_keys_called_with(self, test_vars_extra_specs) - nova_client.flavor_access.add_tenant_access.assert_called_once_with( - mock_flavor, - test_vars_tenant_id - ) - - self.assertEqual( - self.test_name, - ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] - ) - self.assertEqual( - self.test_id, - ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] - ) - self.assertEqual( - FLAVOR_OPENSTACK_TYPE, - ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] - ) - self.assertEqual( - test_vars_extra_specs, - ctx.instance.runtime_properties[EXTRA_SPECS_PROPERTY] - ) - self.assertEqual( - test_vars_tenants, - ctx.instance.runtime_properties[TENANTS_PROPERTY] - ) - - # when (delete) - nova_plugin.flavor.delete(nova_client=nova_client) - - # then (delete) - self.assertNotIn( - OPENSTACK_ID_PROPERTY, - ctx.instance.runtime_properties - ) - self.assertNotIn( - OPENSTACK_NAME_PROPERTY, - ctx.instance.runtime_properties - ) - self.assertNotIn( - OPENSTACK_TYPE_PROPERTY, - ctx.instance.runtime_properties - ) - self.assertNotIn( - EXTRA_SPECS_PROPERTY, - ctx.instance.runtime_properties - ) - self.assertNotIn( - TENANTS_PROPERTY, - ctx.instance.runtime_properties - ) - - def test_list_flavors(self, *_): - # given - test_vars = { - 'flavor': {}, - 'resource_id': '' - } - - ctx = self.mock_ctx( - test_vars, - self.test_id, - self.test_deployment_id, - {OPENSTACK_ID_PROPERTY: self.test_id} - ) - mock_flavor = self.MockFlavorOS(self.test_id, self.test_name) - nova_client = self.mock_nova_client(mock_flavor) - nova_plugin.flavor.ctx = ctx - - # when - nova_plugin.flavor.list_flavors(args={}, nova_client=nova_client) - - # then - flavor_list = FLAVOR_OPENSTACK_TYPE + '_list' - self.assertIn(flavor_list, ctx.instance.runtime_properties) - self.assertEqual(1, len(ctx.instance.runtime_properties[flavor_list])) diff --git a/nova_plugin/tests/test_host_aggregate.py b/nova_plugin/tests/test_host_aggregate.py deleted file mode 100644 index 8482dd88..00000000 --- a/nova_plugin/tests/test_host_aggregate.py +++ /dev/null @@ -1,504 +0,0 @@ -import mock -import unittest - -from cloudify.context import NODE_INSTANCE - -from cloudify.mocks import ( - MockContext, - MockNodeInstanceContext, - MockNodeContext -) -from openstack_plugin_common import ( - OPENSTACK_ID_PROPERTY, - OPENSTACK_NAME_PROPERTY, - OPENSTACK_TYPE_PROPERTY, - OPENSTACK_RESOURCE_PROPERTY -) -from nova_plugin.host_aggregate import ( - HOST_AGGREGATE_OPENSTACK_TYPE, - HOSTS_PROPERTY -) -import nova_plugin - - -class TestHostAggregate(unittest.TestCase): - test_id = 'test-id' - test_name = 'test-name' - existing_test_id = 'existing-test-id' - existing_test_name = 'existing-test-name' - updated_name = 'updated-name' - test_deployment_id = 'test-deployment-id' - - class MockHostAggregateOS: - def __init__(self, id, name, hosts=None): - self._id = id - self._name = name - self._hosts = hosts - - @property - def id(self): - return self._id - - @property - def name(self): - return self._name - - @property - def hosts(self): - return self._hosts or [] - - def to_dict(self): - return {'name': self.name, 'id': self.id} - - def mock_nova_client(self, - mock_host_aggregate, - mocked_updated_host_aggregate=None): - nova_client = mock.MagicMock() - - nova_client.aggregates.create.return_value = mock_host_aggregate - nova_client.aggregates.get.return_value = mock_host_aggregate - nova_client.aggregates.list.return_value = [mock_host_aggregate] - nova_client.aggregates.find.return_value = mock.MagicMock( - id=self.test_name - ) - nova_client.aggregates.update.return_value = \ - mocked_updated_host_aggregate - - nova_client.aggregates.add_host = mock.MagicMock() - nova_client.aggregates.remove_host = mock.MagicMock() - nova_client.aggregates.set_metadata = mock.MagicMock() - - nova_client.get_id_from_resource.return_value = \ - self.existing_test_id - nova_client.get_name_from_resource.return_value = \ - self.existing_test_name - - return nova_client - - def mock_ctx(self, - test_vars, - test_id, - test_deployment_id, - runtime_properties=None): - - ctx = MockContext() - ctx.node = MockNodeContext(properties=test_vars) - ctx.instance = MockNodeInstanceContext( - id=test_id, - runtime_properties=runtime_properties or {} - ) - ctx.deployment = mock.Mock() - ctx.deployment.id = test_deployment_id - ctx.bootstrap_context = mock.Mock() - setattr(ctx.bootstrap_context, 'resources_prefix', '') - ctx.type = NODE_INSTANCE - ctx.logger = mock.Mock() - - return ctx - - @mock.patch( - 'openstack_plugin_common._handle_kw', - autospec=True, - return_value=None - ) - def test_add_hosts(self, *_): - # given - test_vars_host1 = 'cf4301' - test_vars_host2 = 'openstack-kilo-t2.novalocal' - test_vars_host3 = 'openstack-kilo-t2.novalocal-2' - test_vars_hosts_initial = [test_vars_host1] - test_vars_hosts_to_add = [test_vars_host2, test_vars_host3] - test_vars_hosts_expected = [ - test_vars_host1, test_vars_host2, test_vars_host3 - ] - - test_vars = { - 'aggregate': { - 'name': self.test_name, - 'availability_zone': 'internal' - }, - 'hosts': test_vars_hosts_initial, - 'metadata': {}, - 'resource_id': '' - } - - ctx = self.mock_ctx( - test_vars, - self.test_id, - self.test_deployment_id, - { - OPENSTACK_ID_PROPERTY: self.test_id, - OPENSTACK_NAME_PROPERTY: self.test_name, - OPENSTACK_TYPE_PROPERTY: HOST_AGGREGATE_OPENSTACK_TYPE, - HOSTS_PROPERTY: test_vars_hosts_initial - } - ) - nova_plugin.host_aggregate.ctx = ctx - - mocked_host_aggregate = self.MockHostAggregateOS( - self.test_id, - self.test_name - ) - nova_client = self.mock_nova_client(mocked_host_aggregate) - - # when - nova_plugin.host_aggregate.add_hosts( - nova_client, - test_vars_hosts_to_add - ) - - # then - self.assertEqual( - set(test_vars_hosts_expected), - set(ctx.instance.runtime_properties[HOSTS_PROPERTY]) - ) - - @mock.patch( - 'openstack_plugin_common._handle_kw', - autospec=True, - return_value=None - ) - def test_remove_hosts(self, *_): - # given - test_vars_host1 = 'cf4301' - test_vars_host2 = 'openstack-kilo-t2.novalocal' - test_vars_host3 = 'openstack-kilo-t2.novalocal-2' - test_vars_hosts_initial = [ - test_vars_host1, test_vars_host2, test_vars_host3 - ] - test_vars_hosts_to_remove = [test_vars_host2, test_vars_host3] - test_vars_hosts_expected = [test_vars_host1] - - test_vars = { - 'aggregate': { - 'name': self.test_name, - 'availability_zone': 'internal' - }, - 'hosts': test_vars_hosts_initial, - 'metadata': {}, - 'resource_id': '' - } - - ctx = self.mock_ctx( - test_vars, - self.test_id, - self.test_deployment_id, - { - OPENSTACK_ID_PROPERTY: self.test_id, - OPENSTACK_NAME_PROPERTY: self.test_name, - OPENSTACK_TYPE_PROPERTY: HOST_AGGREGATE_OPENSTACK_TYPE, - HOSTS_PROPERTY: test_vars_hosts_initial - } - ) - nova_plugin.host_aggregate.ctx = ctx - - mocked_host_aggregate = self.MockHostAggregateOS( - self.test_id, - self.test_name - ) - nova_client = self.mock_nova_client(mocked_host_aggregate) - - # when - nova_plugin.host_aggregate.remove_hosts( - nova_client, test_vars_hosts_to_remove - ) - - # then - self.assertEqual( - set(test_vars_hosts_expected), - set(ctx.instance.runtime_properties[HOSTS_PROPERTY]) - ) - - @mock.patch( - 'openstack_plugin_common._handle_kw', - autospec=True, - return_value=None - ) - def test_create_and_delete(self, *_): - # given - test_vars_host1 = 'cf4301' - test_vars_host2 = 'openstack-kilo-t2.novalocal' - test_vars_hosts = [test_vars_host1, test_vars_host2] - test_vars_metadata = { - 'test': 'value1' - } - test_vars = { - 'aggregate': { - 'name': self.test_name, - 'availability_zone': 'internal' - }, - 'hosts': test_vars_hosts, - 'metadata': test_vars_metadata, - 'resource_id': '' - } - - ctx = self.mock_ctx(test_vars, self.test_id, self.test_deployment_id) - nova_plugin.host_aggregate.ctx = ctx - - mocked_host_aggregate = self.MockHostAggregateOS( - self.test_id, - self.test_name, - test_vars_hosts - ) - nova_client = self.mock_nova_client(mocked_host_aggregate) - - # when (create) - nova_plugin.host_aggregate.create(nova_client, {}) - - # then (create) - self.assertEqual( - self.test_name, - ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] - ) - self.assertEqual( - self.test_id, - ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] - ) - self.assertEqual( - HOST_AGGREGATE_OPENSTACK_TYPE, - ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] - ) - self.assertEqual( - test_vars_hosts, - ctx.instance.runtime_properties[HOSTS_PROPERTY] - ) - nova_client.aggregates.add_host.assert_any_call( - mocked_host_aggregate, - test_vars_host1 - ) - nova_client.aggregates.add_host.assert_any_call( - mocked_host_aggregate, - test_vars_host2 - ) - nova_client.aggregates.set_metadata.assert_called_once_with( - mocked_host_aggregate, - test_vars_metadata - ) - - # when (delete) - nova_plugin.host_aggregate.delete(nova_client) - - # then (delete) - nova_client.aggregates.remove_host.assert_any_call( - self.test_id, - test_vars_host1 - ) - nova_client.aggregates.remove_host.assert_any_call( - self.test_id, - test_vars_host2 - ) - self.assertNotIn( - OPENSTACK_ID_PROPERTY, - ctx.instance.runtime_properties - ) - self.assertNotIn( - OPENSTACK_NAME_PROPERTY, - ctx.instance.runtime_properties - ) - self.assertNotIn( - OPENSTACK_TYPE_PROPERTY, - ctx.instance.runtime_properties - ) - self.assertNotIn( - HOSTS_PROPERTY, - ctx.instance.runtime_properties - ) - - @mock.patch( - 'openstack_plugin_common._handle_kw', - autospec=True, - return_value=None - ) - @mock.patch( - 'openstack_plugin_common.get_resource_by_name_or_id', - autospec=True, - return_value=MockHostAggregateOS( - existing_test_id, - existing_test_name - ) - ) - def test_create_and_delete_external_resource(self, *_): - # given - test_vars = { - 'aggregate': {}, - 'resource_id': self.existing_test_id, - 'use_external_resource': True - } - - ctx = self.mock_ctx(test_vars, self.test_id, self.test_deployment_id) - nova_plugin.host_aggregate.ctx = ctx - nova_client = self.mock_nova_client(None) - - # when (create) - nova_plugin.host_aggregate.create(nova_client, {}) - - # then (create) - self.assertEqual( - self.existing_test_name, - ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] - ) - self.assertEqual( - self.existing_test_id, - ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] - ) - self.assertEqual( - HOST_AGGREGATE_OPENSTACK_TYPE, - ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] - ) - self.assertTrue( - ctx.instance.runtime_properties[OPENSTACK_RESOURCE_PROPERTY] - ) - nova_client.aggregates.create.assert_not_called() - nova_client.aggregates.add_host.assert_not_called() - nova_client.aggregates.set_metadata.assert_not_called() - - # when (delete) - nova_plugin.host_aggregate.delete(nova_client) - - # then (delete) - nova_client.aggregates.remove_host.assert_not_called() - nova_client.aggregates.delete.assert_not_called() - - self.assertNotIn( - OPENSTACK_ID_PROPERTY, - ctx.instance.runtime_properties - ) - self.assertNotIn( - OPENSTACK_NAME_PROPERTY, - ctx.instance.runtime_properties - ) - self.assertNotIn( - OPENSTACK_TYPE_PROPERTY, - ctx.instance.runtime_properties - ) - - @mock.patch( - 'openstack_plugin_common._handle_kw', - autospec=True, - return_value=None - ) - def test_update(self, *_): - # given - test_vars_hosts = ['cf4301', 'openstack-kilo-t2.novalocal'] - test_vars_metadata = { - 'test': 'value1' - } - test_vars = { - 'aggregate': { - 'name': self.test_name, - 'availability_zone': 'internal' - }, - 'hosts': test_vars_hosts, - 'metadata': test_vars_metadata, - 'resource_id': '' - } - - ctx = self.mock_ctx( - test_vars, - self.test_id, - self.test_deployment_id, - { - OPENSTACK_ID_PROPERTY: self.test_id, - OPENSTACK_NAME_PROPERTY: self.test_name, - OPENSTACK_TYPE_PROPERTY: HOST_AGGREGATE_OPENSTACK_TYPE, - HOSTS_PROPERTY: test_vars_hosts - } - ) - nova_plugin.host_aggregate.ctx = ctx - - mocked_host_aggregate = self.MockHostAggregateOS( - self.test_id, - self.test_name - ) - - mocked_updated_host_aggregate = \ - self.MockHostAggregateOS(self.test_id, self.updated_name) - nova_client = self.mock_nova_client( - mocked_host_aggregate, - mocked_updated_host_aggregate - ) - - # when - nova_plugin.host_aggregate.update( - nova_client, - {'aggregate': mocked_updated_host_aggregate.to_dict()} - ) - - # then - self.assertEqual( - self.updated_name, - ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] - ) - self.assertEqual( - self.test_id, - ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] - ) - self.assertEqual( - HOST_AGGREGATE_OPENSTACK_TYPE, - ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] - ) - self.assertEqual( - test_vars_hosts, - ctx.instance.runtime_properties[HOSTS_PROPERTY] - ) - nova_client.aggregates.update.assert_called_once_with( - self.test_id, - mocked_updated_host_aggregate.to_dict() - ) - nova_client.aggregates.set_metadata.assert_called_once_with( - self.test_id, - test_vars_metadata - ) - - @mock.patch( - 'openstack_plugin_common._handle_kw', - autospec=True, - return_value=None - ) - def test_list(self, *_): - # given - test_vars_host1 = 'cf4301' - test_vars_host2 = 'openstack-kilo-t2.novalocal' - test_vars_hosts = [test_vars_host1, test_vars_host2] - test_vars_metadata = { - 'test': 'value1' - } - test_vars = { - 'aggregate': { - 'name': self.test_name, - 'availability_zone': 'internal' - }, - 'hosts': test_vars_hosts, - 'metadata': test_vars_metadata, - 'resource_id': '' - } - - ctx = self.mock_ctx( - test_vars, - self.test_id, - self.test_deployment_id, - { - OPENSTACK_ID_PROPERTY: self.test_id, - OPENSTACK_NAME_PROPERTY: self.test_name, - OPENSTACK_TYPE_PROPERTY: HOST_AGGREGATE_OPENSTACK_TYPE, - HOSTS_PROPERTY: test_vars_hosts - } - ) - nova_plugin.host_aggregate.ctx = ctx - - mocked_host_aggregate = self.MockHostAggregateOS( - self.test_id, - self.test_name - ) - nova_client = self.mock_nova_client(mocked_host_aggregate) - - # when - nova_plugin.host_aggregate.list_host_aggregates(nova_client) - - # then - ha_list_key = '{}_list'.format(HOST_AGGREGATE_OPENSTACK_TYPE) - self.assertIn(ha_list_key, ctx.instance.runtime_properties) - self.assertEqual( - [{'name': self.test_name, 'id': self.test_id}], - ctx.instance.runtime_properties[ha_list_key] - ) diff --git a/nova_plugin/tests/test_relationships.py b/nova_plugin/tests/test_relationships.py deleted file mode 100644 index 2814057f..00000000 --- a/nova_plugin/tests/test_relationships.py +++ /dev/null @@ -1,228 +0,0 @@ -######### -# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - -"""Test the functions related to retrieving relationship information. - -Functions under test are mostly inside openstack_plugin_common: -get_relationships_by_openstack_type -get_connected_nodes_by_openstack_type -get_openstack_ids_of_connected_nodes_by_openstack_type -get_single_connected_node_by_openstack_type -""" - -import uuid -from unittest import TestCase - -from neutron_plugin.network import NETWORK_OPENSTACK_TYPE - -from cloudify.exceptions import NonRecoverableError - -from cloudify.mocks import ( - MockCloudifyContext, - MockNodeContext, - MockNodeInstanceContext, - MockRelationshipContext, - MockRelationshipSubjectContext, -) -from openstack_plugin_common import ( - OPENSTACK_ID_PROPERTY, - OPENSTACK_TYPE_PROPERTY, - get_openstack_id_of_single_connected_node_by_openstack_type, - get_openstack_ids_of_connected_nodes_by_openstack_type, - get_relationships_by_openstack_type, - get_single_connected_node_by_openstack_type, -) - - -class RelationshipsTestBase(TestCase): - def _make_vm_ctx_with_relationships(self, rel_specs, properties=None): - """Prepare a mock CloudifyContext from the given relationship spec. - - rel_specs is an ordered collection of relationship specs - dicts - with the keys "node" and "instance" used to construct the - MockNodeContext and the MockNodeInstanceContext, and optionally a - "type" key. - Examples: [ - {}, - {"node": {"id": 5}}, - { - "type": "some_type", - "instance": { - "id": 3, - "runtime_properties":{} - } - } - ] - """ - if properties is None: - properties = {} - relationships = [] - for rel_spec in rel_specs: - node = rel_spec.get('node', {}) - node_id = node.pop('id', uuid.uuid4().hex) - - instance = rel_spec.get('instance', {}) - instance_id = instance.pop('id', '{0}_{1}'.format( - node_id, uuid.uuid4().hex)) - if 'properties' not in node: - node['properties'] = {} - node_ctx = MockNodeContext(id=node_id, **node) - instance_ctx = MockNodeInstanceContext(id=instance_id, **instance) - - rel_subject_ctx = MockRelationshipSubjectContext( - node=node_ctx, instance=instance_ctx) - rel_type = rel_spec.get('type') - rel_ctx = MockRelationshipContext(target=rel_subject_ctx, - type=rel_type) - relationships.append(rel_ctx) - return MockCloudifyContext(node_id='vm', properties=properties, - relationships=relationships) - - -class TestGettingRelatedResources(RelationshipsTestBase): - - def test_get_relationships_finds_all_by_type(self): - """get_relationships_by_openstack_type returns all rels that match.""" - rel_specs = [{ - 'instance': { - 'id': instance_id, - 'runtime_properties': { - OPENSTACK_TYPE_PROPERTY: NETWORK_OPENSTACK_TYPE - } - } - } for instance_id in range(3)] - - rel_specs.append({ - 'instance': { - 'runtime_properties': { - OPENSTACK_TYPE_PROPERTY: 'something else' - } - } - }) - - ctx = self._make_vm_ctx_with_relationships(rel_specs) - filtered = get_relationships_by_openstack_type(ctx, - NETWORK_OPENSTACK_TYPE) - self.assertEqual(3, len(filtered)) - - def test_get_ids_of_nodes_by_type(self): - - rel_spec = { - 'instance': { - 'runtime_properties': { - OPENSTACK_TYPE_PROPERTY: NETWORK_OPENSTACK_TYPE, - OPENSTACK_ID_PROPERTY: 'the node id' - } - } - } - ctx = self._make_vm_ctx_with_relationships([rel_spec]) - ids = get_openstack_ids_of_connected_nodes_by_openstack_type( - ctx, NETWORK_OPENSTACK_TYPE) - self.assertEqual(['the node id'], ids) - - -class TestGetSingleByID(RelationshipsTestBase): - def _make_instances(self, ids): - """Mock a context with relationships to instances with given ids.""" - rel_specs = [{ - 'node': { - 'id': node_id - }, - 'instance': { - 'runtime_properties': { - OPENSTACK_TYPE_PROPERTY: NETWORK_OPENSTACK_TYPE, - OPENSTACK_ID_PROPERTY: node_id - } - } - } for node_id in ids] - return self._make_vm_ctx_with_relationships(rel_specs) - - def test_get_single_id(self): - ctx = self._make_instances(['the node id']) - found_id = get_openstack_id_of_single_connected_node_by_openstack_type( - ctx, NETWORK_OPENSTACK_TYPE) - self.assertEqual('the node id', found_id) - - def test_get_single_id_two_found(self): - ctx = self._make_instances([0, 1]) - self.assertRaises( - NonRecoverableError, - get_openstack_id_of_single_connected_node_by_openstack_type, ctx, - NETWORK_OPENSTACK_TYPE) - - def test_get_single_id_two_found_if_exists_true(self): - ctx = self._make_instances([0, 1]) - - try: - get_openstack_id_of_single_connected_node_by_openstack_type( - ctx, NETWORK_OPENSTACK_TYPE, if_exists=True) - except NonRecoverableError as e: - self.assertIn(NETWORK_OPENSTACK_TYPE, e.message) - else: - self.fail() - - def test_get_single_id_if_exists_none_found(self): - ctx = self._make_instances([]) - found = get_openstack_id_of_single_connected_node_by_openstack_type( - ctx, NETWORK_OPENSTACK_TYPE, if_exists=True) - self.assertIsNone(found) - - def test_get_single_id_none_found(self): - rel_spec = [] - ctx = self._make_vm_ctx_with_relationships(rel_spec) - self.assertRaises( - NonRecoverableError, - get_openstack_id_of_single_connected_node_by_openstack_type, - ctx, - NETWORK_OPENSTACK_TYPE) - - def test_get_single_node(self): - ctx = self._make_instances(['the node id']) - found_node = get_single_connected_node_by_openstack_type( - ctx, NETWORK_OPENSTACK_TYPE) - self.assertEqual('the node id', found_node.id) - - def test_get_single_node_two_found(self): - ctx = self._make_instances([0, 1]) - self.assertRaises( - NonRecoverableError, - get_single_connected_node_by_openstack_type, - ctx, NETWORK_OPENSTACK_TYPE) - - def test_get_single_node_two_found_if_exists(self): - ctx = self._make_instances([0, 1]) - - self.assertRaises( - NonRecoverableError, - get_single_connected_node_by_openstack_type, - ctx, - NETWORK_OPENSTACK_TYPE, - if_exists=True) - - def test_get_single_node_if_exists_none_found(self): - ctx = self._make_instances([]) - - found = get_single_connected_node_by_openstack_type( - ctx, NETWORK_OPENSTACK_TYPE, if_exists=True) - self.assertIsNone(found) - - def test_get_single_node_none_found(self): - ctx = self._make_instances([]) - - self.assertRaises( - NonRecoverableError, - get_single_connected_node_by_openstack_type, - ctx, - NETWORK_OPENSTACK_TYPE) diff --git a/nova_plugin/tests/test_server.py b/nova_plugin/tests/test_server.py deleted file mode 100644 index 77d3ef95..00000000 --- a/nova_plugin/tests/test_server.py +++ /dev/null @@ -1,1332 +0,0 @@ -######### -# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - -import collections -from os import path -import tempfile - -import unittest -import mock - -import nova_plugin -from cloudify.test_utils import workflow_test - -from openstack_plugin_common import NeutronClientWithSugar, \ - OPENSTACK_TYPE_PROPERTY, OPENSTACK_ID_PROPERTY, OPENSTACK_NAME_PROPERTY -from neutron_plugin.network import NETWORK_OPENSTACK_TYPE -from neutron_plugin.port import PORT_OPENSTACK_TYPE -from nova_plugin.tests.test_relationships import RelationshipsTestBase -from nova_plugin.server import _prepare_server_nics -from novaclient import exceptions as nova_exceptions -from cinder_plugin.volume import VOLUME_OPENSTACK_TYPE, VOLUME_BOOTABLE -from cloudify.exceptions import NonRecoverableError, RecoverableError -from cloudify.state import current_ctx - -from cloudify.utils import setup_logger - -from cloudify.mocks import ( - MockNodeContext, - MockContext, - MockCloudifyContext, - MockNodeInstanceContext, - MockRelationshipContext, - MockRelationshipSubjectContext -) - - -class TestServer(unittest.TestCase): - - blueprint_path = path.join('resources', - 'test-start-operation-retry-blueprint.yaml') - - @mock.patch('nova_plugin.server.create') - @mock.patch('nova_plugin.server._set_network_and_ip_runtime_properties') - @workflow_test(blueprint_path, copy_plugin_yaml=True) - def test_nova_server_lifecycle_start(self, cfy_local, *_): - - test_vars = { - 'counter': 0, - 'server': mock.MagicMock() - } - - def mock_get_server_by_context(*_): - s = test_vars['server'] - if test_vars['counter'] == 0: - s.status = nova_plugin.server.SERVER_STATUS_BUILD - else: - s.status = nova_plugin.server.SERVER_STATUS_ACTIVE - test_vars['counter'] += 1 - return s - - with mock.patch('nova_plugin.server.get_server_by_context', - new=mock_get_server_by_context): - cfy_local.execute('install', task_retries=3) - - self.assertEqual(2, test_vars['counter']) - self.assertEqual(0, test_vars['server'].start.call_count) - - @workflow_test(blueprint_path, copy_plugin_yaml=True) - @mock.patch('nova_plugin.server.create') - @mock.patch('nova_plugin.server._set_network_and_ip_runtime_properties') - def test_nova_server_lifecycle_start_after_stop(self, cfy_local, *_): - - test_vars = { - 'counter': 0, - 'server': mock.MagicMock() - } - - def mock_get_server_by_context(_): - s = test_vars['server'] - if test_vars['counter'] == 0: - s.status = nova_plugin.server.SERVER_STATUS_SHUTOFF - elif test_vars['counter'] == 1: - setattr(s, - nova_plugin.server.OS_EXT_STS_TASK_STATE, - nova_plugin.server.SERVER_TASK_STATE_POWERING_ON) - else: - s.status = nova_plugin.server.SERVER_STATUS_ACTIVE - test_vars['counter'] += 1 - test_vars['server'] = s - return s - - with mock.patch('nova_plugin.server.get_server_by_context', - new=mock_get_server_by_context): - cfy_local.execute('install', task_retries=3) - - self.assertEqual(1, test_vars['server'].start.call_count) - self.assertEqual(3, test_vars['counter']) - - @workflow_test(blueprint_path, copy_plugin_yaml=True) - @mock.patch('nova_plugin.server.create') - @mock.patch('nova_plugin.server._set_network_and_ip_runtime_properties') - def test_nova_server_lifecycle_start_unknown_status(self, cfy_local, *_): - test_vars = { - 'counter': 0, - 'server': mock.MagicMock() - } - - def mock_get_server_by_context(_): - s = test_vars['server'] - if test_vars['counter'] == 0: - s.status = '### unknown-status ###' - test_vars['counter'] += 1 - test_vars['server'] = s - return s - - with mock.patch('nova_plugin.server.get_server_by_context', - new=mock_get_server_by_context): - self.assertRaisesRegexp(RuntimeError, - 'Unexpected server state', - cfy_local.execute, - 'install') - - self.assertEqual(0, test_vars['server'].start.call_count) - self.assertEqual(1, test_vars['counter']) - - @workflow_test(blueprint_path, copy_plugin_yaml=True) - @mock.patch('nova_plugin.server.start') - @mock.patch('nova_plugin.server._handle_image_or_flavor') - @mock.patch('nova_plugin.server._fail_on_missing_required_parameters') - @mock.patch('openstack_plugin_common.nova_client') - def test_nova_server_creation_param_integrity( - self, cfy_local, mock_nova, *args): - cfy_local.execute('install', task_retries=0) - calls = mock_nova.Client.return_value.servers.method_calls - self.assertEqual(1, len(calls)) - kws = calls[0][2] - self.assertIn('scheduler_hints', kws) - self.assertEqual(kws['scheduler_hints'], - {'group': 'affinity-group-id'}, - 'expecting \'scheduler_hints\' value to exist') - - @workflow_test(blueprint_path, copy_plugin_yaml=True, - inputs={'use_password': True}) - @mock.patch('nova_plugin.server.create') - @mock.patch('nova_plugin.server._set_network_and_ip_runtime_properties') - @mock.patch( - 'nova_plugin.server.get_single_connected_node_by_openstack_type', - autospec=True, return_value=None) - def test_nova_server_with_use_password(self, cfy_local, *_): - - test_vars = { - 'counter': 0, - 'server': mock.MagicMock() - } - - tmp_path = tempfile.NamedTemporaryFile(prefix='key_name') - key_path = tmp_path.name - - def mock_get_server_by_context(_): - s = test_vars['server'] - if test_vars['counter'] == 0: - s.status = nova_plugin.server.SERVER_STATUS_BUILD - else: - s.status = nova_plugin.server.SERVER_STATUS_ACTIVE - test_vars['counter'] += 1 - - def check_agent_key_path(private_key): - self.assertEqual(private_key, key_path) - return private_key - - s.get_password = check_agent_key_path - return s - - with mock.patch('nova_plugin.server.get_server_by_context', - mock_get_server_by_context): - with mock.patch( - 'cloudify.context.BootstrapContext.' - 'CloudifyAgent.agent_key_path', - new_callable=mock.PropertyMock, return_value=key_path): - cfy_local.execute('install', task_retries=5) - - def tearDown(self): - current_ctx.clear() - - def _prepare_mocks(self, nova_instance): - nova_instance.servers.stop = mock.Mock() - nova_instance.servers.start = mock.Mock() - nova_instance.servers.resume = mock.Mock() - nova_instance.servers.suspend = mock.Mock() - - @mock.patch('openstack_plugin_common.NovaClientWithSugar') - @mock.patch('time.sleep', mock.Mock()) - def test_stop(self, nova_m): - nova_instance = nova_m.return_value - - # use external resource - server_ctx = MockCloudifyContext( - node_id="node_id", - node_name="node_name", - properties={'use_external_resource': True}, - runtime_properties={} - ) - current_ctx.set(server_ctx) - nova_plugin.server.stop(ctx=server_ctx) - - # use internal already stoped vm - server_ctx = self._simplectx() - server_mock = mock.Mock() - server_mock.status = nova_plugin.server.SERVER_STATUS_SHUTOFF - nova_instance.servers.get = mock.Mock(return_value=server_mock) - nova_plugin.server.stop(ctx=server_ctx) - - # use internal slow stop - server_ctx = self._simplectx() - server_mock = mock.Mock() - server_mock.status = nova_plugin.server.SERVER_STATUS_ACTIVE - nova_instance.servers.get = mock.Mock(return_value=server_mock) - self._prepare_mocks(nova_instance) - - nova_plugin.server.stop(ctx=server_ctx) - - nova_instance.servers.stop.assert_has_calls([mock.call(server_mock)]) - - @mock.patch('openstack_plugin_common.NovaClientWithSugar') - @mock.patch('time.sleep', mock.Mock()) - def test_server_stop(self, nova_m): - nova_instance = nova_m.return_value - - # use internal already stoped vm - self._simplectx() - server_mock = mock.Mock() - server_mock.status = nova_plugin.server.SERVER_STATUS_SHUTOFF - nova_instance.servers.get = mock.Mock(return_value=server_mock) - - nova_plugin.server._server_stop(nova_instance, server_mock) - - nova_instance.servers.stop.assert_not_called() - nova_instance.servers.start.assert_not_called() - - # use internal slow stop - self._simplectx() - server_mock = mock.Mock() - server_mock.status = nova_plugin.server.SERVER_STATUS_ACTIVE - nova_instance.servers.get = mock.Mock(return_value=server_mock) - self._prepare_mocks(nova_instance) - - nova_plugin.server._server_stop(nova_instance, server_mock) - - nova_instance.servers.stop.assert_has_calls([mock.call(server_mock)]) - nova_instance.servers.start.assert_not_called() - - # stop on first call - self._simplectx() - self.func_called = False - - def _server_get(server_id): - server_mock = mock.Mock() - - if not self.func_called: - server_mock.status = nova_plugin.server.SERVER_STATUS_ACTIVE - self.func_called = True - else: - server_mock.status = nova_plugin.server.SERVER_STATUS_SHUTOFF - return server_mock - - nova_instance.servers.get = _server_get - self._prepare_mocks(nova_instance) - - nova_plugin.server._server_stop(nova_instance, server_mock) - - nova_instance.servers.stop.assert_has_calls([mock.call(server_mock)]) - nova_instance.servers.start.assert_not_called() - - @mock.patch('openstack_plugin_common.NovaClientWithSugar') - @mock.patch('time.sleep', mock.Mock()) - def test_server_start(self, nova_m): - nova_instance = nova_m.return_value - - # use internal already started vm - self._simplectx() - server_mock = mock.Mock() - server_mock.status = nova_plugin.server.SERVER_STATUS_ACTIVE - nova_instance.servers.get = mock.Mock(return_value=server_mock) - - nova_plugin.server._server_start(nova_instance, server_mock) - - nova_instance.servers.stop.assert_not_called() - nova_instance.servers.start.assert_not_called() - - # use internal slow start - self._simplectx() - server_mock = mock.Mock() - server_mock.status = nova_plugin.server.SERVER_STATUS_SHUTOFF - nova_instance.servers.get = mock.Mock(return_value=server_mock) - self._prepare_mocks(nova_instance) - - nova_plugin.server._server_start(nova_instance, server_mock) - - nova_instance.servers.start.assert_has_calls([mock.call(server_mock)]) - nova_instance.servers.stop.assert_not_called() - - # start on first call - self._simplectx() - self.func_called = False - - def _server_get(server_id): - server_mock = mock.Mock() - - if not self.func_called: - server_mock.status = nova_plugin.server.SERVER_STATUS_SHUTOFF - self.func_called = True - else: - server_mock.status = nova_plugin.server.SERVER_STATUS_ACTIVE - return server_mock - - nova_instance.servers.get = _server_get - self._prepare_mocks(nova_instance) - - nova_plugin.server._server_start(nova_instance, server_mock) - - nova_instance.servers.start.assert_has_calls([mock.call(server_mock)]) - nova_instance.servers.stop.assert_not_called() - - @mock.patch('openstack_plugin_common.NovaClientWithSugar') - @mock.patch('time.sleep', mock.Mock()) - def test_server_reboot(self, nova_m): - ctx_operation = { - 'retry_number': 0 - } - nova_instance = nova_m.return_value - - # use internal already started vm - self._simplectx(operation=ctx_operation) - server_mock = mock.Mock() - server_mock.status = nova_plugin.server.SERVER_STATUS_ACTIVE - nova_instance.servers.get = mock.Mock(return_value=server_mock) - - nova_plugin.server.reboot(reboot_type='soft') - - nova_instance.servers.stop.assert_not_called() - nova_instance.servers.start.assert_not_called() - nova_instance.servers.reboot.assert_has_calls( - [mock.call(server_mock, 'SOFT')]) - - # use internal already started vm - self._simplectx(operation=ctx_operation) - server_mock = mock.Mock() - server_mock.status = nova_plugin.server.SERVER_STATUS_ACTIVE - nova_instance.servers.get = mock.Mock(return_value=server_mock) - - nova_plugin.server.reboot(reboot_type='hard') - - nova_instance.servers.stop.assert_not_called() - nova_instance.servers.start.assert_not_called() - nova_instance.servers.reboot.assert_has_calls( - [mock.call(server_mock, 'HARD')]) - - @mock.patch('openstack_plugin_common.NovaClientWithSugar') - @mock.patch('time.sleep', mock.Mock()) - def test_server_resume(self, nova_m): - nova_instance = nova_m.return_value - - # use internal already resumed vm - self._simplectx() - server_mock = mock.Mock() - server_mock.status = nova_plugin.server.SERVER_STATUS_ACTIVE - nova_instance.servers.get = mock.Mock(return_value=None) - - nova_plugin.server._server_resume(nova_instance, server_mock) - - nova_instance.servers.get.assert_not_called() - nova_instance.servers.stop.assert_not_called() - nova_instance.servers.start.assert_not_called() - - # use internal run resume - self._simplectx() - server_mock = mock.Mock() - server_mock.status = nova_plugin.server.SERVER_STATUS_SUSPENDED - nova_instance.servers.get = mock.Mock(return_value=server_mock) - self._prepare_mocks(nova_instance) - - nova_plugin.server._server_resume(nova_instance, server_mock) - - nova_instance.servers.resume.assert_has_calls([mock.call(server_mock)]) - nova_instance.servers.stop.assert_not_called() - nova_instance.servers.get.assert_not_called() - nova_instance.servers.start.assert_not_called() - nova_instance.servers.stop.assert_not_called() - - @mock.patch('openstack_plugin_common.NovaClientWithSugar') - @mock.patch('time.sleep', mock.Mock()) - def test_server_suspend(self, nova_m): - nova_instance = nova_m.return_value - - # use internal already resumed vm - self._simplectx() - server_mock = mock.Mock() - server_mock.status = nova_plugin.server.SERVER_STATUS_SUSPENDED - nova_instance.servers.get = mock.Mock(return_value=None) - - nova_plugin.server._server_suspend(nova_instance, server_mock) - - nova_instance.servers.get.assert_not_called() - nova_instance.servers.stop.assert_not_called() - nova_instance.servers.start.assert_not_called() - - # use internal run resume - self._simplectx() - server_mock = mock.Mock() - server_mock.status = nova_plugin.server.SERVER_STATUS_ACTIVE - nova_instance.servers.get = mock.Mock(return_value=server_mock) - self._prepare_mocks(nova_instance) - - nova_plugin.server._server_suspend(nova_instance, server_mock) - - nova_instance.servers.suspend.assert_has_calls( - [mock.call(server_mock)]) - nova_instance.servers.resume.assert_not_called() - nova_instance.servers.stop.assert_not_called() - nova_instance.servers.get.assert_not_called() - nova_instance.servers.start.assert_not_called() - nova_instance.servers.stop.assert_not_called() - - @mock.patch('openstack_plugin_common.NovaClientWithSugar') - @mock.patch('time.sleep', mock.Mock()) - def test_freeze_suspend(self, nova_m): - nova_instance = nova_m.return_value - - # use internal already suspended vm - server_ctx = self._simplectx() - server_mock = mock.Mock() - server_mock.status = nova_plugin.server.SERVER_STATUS_SUSPENDED - nova_instance.servers.get = mock.Mock(return_value=server_mock) - nova_plugin.server.freeze_suspend(ctx=server_ctx) - - @mock.patch('openstack_plugin_common.NovaClientWithSugar') - @mock.patch('time.sleep', mock.Mock()) - def test_freeze_resume(self, nova_m): - nova_instance = nova_m.return_value - - # use internal already resumed vm - server_ctx = self._simplectx() - server_mock = mock.Mock() - server_mock.status = nova_plugin.server.SERVER_STATUS_ACTIVE - nova_instance.servers.get = mock.Mock(return_value=server_mock) - nova_plugin.server.freeze_resume(ctx=server_ctx) - - @mock.patch('openstack_plugin_common.NovaClientWithSugar') - @mock.patch('time.sleep', mock.Mock()) - def test_check_finished_upload(self, nova_m): - nova_instance = nova_m.return_value - - # ready for actions - self._simplectx() - server_mock = mock.Mock() - server_mock.id = 'server_id' - setattr(server_mock, nova_plugin.server.OS_EXT_STS_TASK_STATE, - 'ready') - nova_instance.servers.get = mock.Mock(return_value=server_mock) - - nova_plugin.server._check_finished_upload(nova_instance, server_mock, - ['image_uploading']) - - # still uploading - setattr(server_mock, nova_plugin.server.OS_EXT_STS_TASK_STATE, - 'image_uploading') - - nova_plugin.server._check_finished_upload(nova_instance, server_mock, - ['image_uploading']) - - def _simplectx(self, operation=None): - server_ctx = MockCloudifyContext( - deployment_id='deployment_id', - node_id="node_id", - node_name="node_name", - properties={}, - operation=operation, - runtime_properties={'external_id': 'server_id'} - ) - current_ctx.set(server_ctx) - return server_ctx - - @mock.patch('openstack_plugin_common.NovaClientWithSugar') - @mock.patch('openstack_plugin_common.GlanceClientWithSugar') - @mock.patch('time.sleep', mock.Mock()) - def test_snapshot_create(self, glance_m, nova_m): - nova_instance = nova_m.return_value - glance_instance = glance_m.return_value - - server_ctx = self._simplectx() - - # snapshot - server_mock = mock.Mock() - server_mock.backup = mock.Mock() - server_mock.create_image = mock.Mock() - server_mock.id = 'server_id' - setattr(server_mock, nova_plugin.server.OS_EXT_STS_TASK_STATE, - 'ready') - nova_instance.servers.get = mock.Mock(return_value=server_mock) - glance_instance.images.list = mock.Mock(return_value=[]) - - with mock.patch('openstack_plugin_common._find_context_in_kw', - mock.Mock(return_value=server_ctx)): - nova_plugin.server.snapshot_create(ctx=server_ctx, - snapshot_name='snapshot_name', - snapshot_rotation=10, - snapshot_incremental=True, - snapshot_type='week') - - nova_instance.servers.get.assert_has_calls( - [mock.call('server_id')] * 3) - server_mock.create_image.assert_called_once_with( - 'vm-server_id-snapshot_name-increment') - server_mock.backup.assert_not_called() - - # backup - server_mock = mock.Mock() - server_mock.backup = mock.Mock() - server_mock.create_image = mock.Mock() - server_mock.id = 'server_id' - setattr(server_mock, nova_plugin.server.OS_EXT_STS_TASK_STATE, - 'ready') - nova_instance.servers.get = mock.Mock(return_value=server_mock) - glance_instance.images.list = mock.Mock(return_value=[]) - - with mock.patch('openstack_plugin_common._find_context_in_kw', - mock.Mock(return_value=server_ctx)): - nova_plugin.server.snapshot_create(ctx=server_ctx, - snapshot_name='snapshot_name', - snapshot_rotation=10, - snapshot_incremental=False, - snapshot_type='week') - - nova_instance.servers.get.assert_has_calls( - [mock.call('server_id')] * 3) - server_mock.create_image.assert_not_called() - server_mock.backup.assert_called_once_with( - 'vm-server_id-snapshot_name-backup', 'week', 10) - glance_instance.images.list.assert_called_once_with(filters={ - 'name': 'vm-server_id-snapshot_name-backup'}) - - # we already has such backup - glance_instance.images.list = mock.Mock(return_value=[{ - 'name': 'others', - 'image_type': 'raw', - 'id': 'a', - 'status': 'active' - }, { - 'name': 'vm-server_id-snapshot_name-backup', - 'image_type': 'raw', - 'id': 'b', - 'status': 'active' - }, { - 'name': 'vm-server_id-snapshot_name-increment', - 'image_type': 'snapshot', - 'id': 'c', - 'status': 'active' - }, { - 'name': 'vm-server_id-snapshot_name-backup', - 'image_type': 'backup', - 'id': 'd', - 'status': 'active' - }]) - with mock.patch('openstack_plugin_common._find_context_in_kw', - mock.Mock(return_value=server_ctx)): - with self.assertRaisesRegexp( - NonRecoverableError, - "Snapshot vm-server_id-snapshot_name-backup already exists." - ): - nova_plugin.server.snapshot_create( - ctx=server_ctx, snapshot_name='snapshot_name', - snapshot_rotation=10, snapshot_incremental=False, - snapshot_type='week') - - @mock.patch('openstack_plugin_common.NovaClientWithSugar') - @mock.patch('openstack_plugin_common.GlanceClientWithSugar') - @mock.patch('time.sleep', mock.Mock()) - def test_snapshot_apply(self, glance_m, nova_m): - nova_instance = nova_m.return_value - glance_instance = glance_m.return_value - - server_ctx = self._simplectx() - - # snapshot - server_mock = mock.Mock() - server_mock.rebuild = mock.Mock() - server_mock.id = 'server_id' - setattr(server_mock, nova_plugin.server.OS_EXT_STS_TASK_STATE, - 'ready') - nova_instance.servers.get = mock.Mock(return_value=server_mock) - glance_instance.images.list = mock.Mock(return_value=[ - { - 'name': 'vm-server_id-snapshot_name-increment', - 'image_type': 'snapshot', - 'id': 'abc', - 'status': 'active' - } - ]) - - with mock.patch('openstack_plugin_common._find_context_in_kw', - mock.Mock(return_value=server_ctx)): - nova_plugin.server.snapshot_apply(ctx=server_ctx, - snapshot_name='snapshot_name', - snapshot_incremental=True) - - nova_instance.servers.get.assert_has_calls( - [mock.call('server_id')] * 3) - server_mock.rebuild.assert_called_once_with("abc") - - # backup - with mock.patch('openstack_plugin_common._find_context_in_kw', - mock.Mock(return_value=server_ctx)): - with self.assertRaisesRegexp( - NonRecoverableError, - 'No snapshots found with name: vm-server_id-snapshot_name.' - ): - nova_plugin.server.snapshot_apply( - ctx=server_ctx, snapshot_name='snapshot_name', - snapshot_incremental=False) - - @mock.patch('openstack_plugin_common.NovaClientWithSugar') - @mock.patch('time.sleep', mock.Mock()) - def test_image_delete(self, glance_m): - glance_instance = glance_m.return_value - server_ctx = self._simplectx() - - # still alive - glance_instance.images.list = mock.Mock(return_value=[ - { - 'name': 'vm-server_id-snapshot_name-increment', - 'image_type': 'snapshot', - 'id': 'abc', - 'status': 'active' - } - ]) - - server_ctx.operation.retry = mock.Mock( - side_effect=RecoverableError()) - with self.assertRaises(RecoverableError): - nova_plugin.server._image_delete( - glance_instance, - snapshot_name='vm-server_id-snapshot_name-increment', - snapshot_incremental=True) - server_ctx.operation.retry.assert_called_with( - message='abc is still alive', retry_after=30) - - # removed - glance_instance.images.list = mock.Mock(return_value=[]) - - nova_plugin.server._image_delete( - glance_instance, snapshot_name='snapshot_name', - snapshot_incremental=True) - - @mock.patch('openstack_plugin_common.NovaClientWithSugar') - @mock.patch('openstack_plugin_common.GlanceClientWithSugar') - @mock.patch('time.sleep', mock.Mock()) - def test_snapshot_delete(self, glance_m, nova_m): - nova_instance = nova_m.return_value - glance_instance = glance_m.return_value - - server_ctx = self._simplectx() - - # snapshot - server_mock = mock.Mock() - server_mock.id = 'server_id' - setattr(server_mock, nova_plugin.server.OS_EXT_STS_TASK_STATE, - 'ready') - nova_instance.servers.get = mock.Mock(return_value=server_mock) - glance_instance.images.delete = mock.Mock() - glance_instance.images.list = mock.Mock(return_value=[ - { - 'name': 'vm-server_id-snapshot_name-increment', - 'image_type': 'snapshot', - 'id': 'abc', - 'status': 'active' - } - ]) - - server_ctx.operation.retry = mock.Mock( - side_effect=RecoverableError('still alive')) - with mock.patch('openstack_plugin_common._find_context_in_kw', - mock.Mock(return_value=server_ctx)): - with self.assertRaisesRegexp( - RecoverableError, - 'still alive' - ): - nova_plugin.server.snapshot_delete( - ctx=server_ctx, snapshot_name='snapshot_name', - snapshot_incremental=True) - server_ctx.operation.retry.assert_called_with( - message='abc is still alive', retry_after=30) - - glance_instance.images.list.assert_has_calls([ - mock.call(filters={"name": "vm-server_id-snapshot_name-increment"}) - ]) - glance_instance.images.delete.assert_called_once_with("abc") - - # backup, if image does not exist - ignore - with mock.patch('openstack_plugin_common._find_context_in_kw', - mock.Mock(return_value=server_ctx)): - nova_plugin.server.snapshot_delete( - ctx=server_ctx, snapshot_name='snapshot_name', - snapshot_incremental=False) - - @mock.patch('openstack_plugin_common.NovaClientWithSugar') - def test_list_servers(self, nova_m): - nova_instance = nova_m.return_value - server_ctx = self._simplectx() - - nova_instance.servers.list = mock.Mock(return_value=[]) - - nova_plugin.server.list_servers(ctx=server_ctx, args={"abc": "def"}) - - nova_instance.servers.list.assert_called_once_with(abc="def") - self.assertEqual( - {'external_id': 'server_id', 'server_list': []}, - server_ctx.instance.runtime_properties - ) - - @mock.patch('openstack_plugin_common.NovaClientWithSugar') - def test_wait_for_server_to_be_deleted(self, nova_m): - nova_instance = nova_m.return_value - self._simplectx() - # removed - nova_instance.servers.get = mock.Mock( - side_effect=nova_exceptions.NotFound("abc")) - nova_plugin.server._wait_for_server_to_be_deleted( - nova_instance, "unknown") - nova_instance.servers.get.assert_called_once_with("unknown") - - # still have - server_mock = mock.Mock() - server_mock.id = "a" - server_mock.status = "b" - nova_instance.servers.get = mock.Mock(return_value=server_mock) - - fake_time_values = [480, 240, 120, 60, 0] - - def fake_time(*_): - return fake_time_values.pop() - - with mock.patch('time.time', fake_time): - with self.assertRaisesRegexp( - RuntimeError, - 'Server unknown has not been deleted. waited for 120 seconds' - ): - nova_plugin.server._wait_for_server_to_be_deleted( - nova_instance, "unknown") - - def test_get_properties_by_node_instance_id(self): - # local run - ctx = self._simplectx() - ctx._local = True - mock_instance = mock.Mock() - mock_instance.node_id = 'node_id' - mock_node = mock.Mock() - mock_node.properties = {'a': 'b'} - ctx._endpoint.get_node_instance = mock.Mock(return_value=mock_instance) - ctx._endpoint.get_node = mock.Mock(return_value=mock_node) - self.assertEqual( - nova_plugin.server._get_properties_by_node_instance_id('abc'), - {'a': 'b'} - ) - ctx._endpoint.get_node_instance.assert_called_once_with('abc') - ctx._endpoint.get_node.assert_called_once_with('node_id') - # manager run - ctx = self._simplectx() - ctx._local = False - fake_client = mock.Mock() - fake_client.node_instances.get = mock.Mock(return_value=mock_instance) - fake_client.nodes.get = mock.Mock(return_value=mock_node) - with mock.patch( - 'nova_plugin.server.get_rest_client', - mock.Mock(return_value=fake_client) - ): - self.assertEqual( - nova_plugin.server._get_properties_by_node_instance_id('abc'), - {'a': 'b'} - ) - fake_client.node_instances.get.assert_called_once_with('abc') - fake_client.nodes.get.assert_called_once_with('deployment_id', - 'node_id') - - def test_validate_external_server_nics(self): - self._simplectx() - external_server = mock.Mock() - external_server.human_id = '_server' - - attached_interface = mock.Mock() - attached_interface.net_id = 'net1' - attached_interface.port_id = 'port1' - - external_server.interface_list = mock.Mock( - return_value=[attached_interface]) - external_server.interface_attach = mock.Mock() - - # Check that we fail if have alredy attached ports - with self.assertRaises(NonRecoverableError) as error: - nova_plugin.server._validate_external_server_nics( - external_server, ['net1'], ['port1']) - - external_server.interface_attach.assert_not_called() - self.assertEqual( - str(error.exception), - "Several ports/networks already connected to external server " - "_server: Networks - ['net1']; Ports - ['port1']" - ) - - # no attached ports from list - nova_plugin.server._validate_external_server_nics( - external_server, ['net2'], ['port2']) - external_server.interface_attach.assert_has_calls([ - mock.call(port_id='port2', net_id=None, fixed_ip=None), - mock.call(port_id=None, net_id='net2', fixed_ip=None) - ]) - - # net attached with port - new_interface = mock.Mock() - new_interface.net_id = 'net2' - new_interface.port_id = 'port2' - results = [ - [new_interface, attached_interface], - [attached_interface] - ] - - def _interface_list(): - return results.pop() - - external_server.interface_attach = mock.Mock() - external_server.interface_list = _interface_list - nova_plugin.server._validate_external_server_nics( - external_server, ['net2'], ['port2']) - external_server.interface_attach.assert_has_calls([ - mock.call(port_id='port2', net_id=None, fixed_ip=None) - ]) - - @mock.patch('openstack_plugin_common.NovaClientWithSugar') - @mock.patch('openstack_plugin_common.NeutronClientWithSugar') - def test_external_server_create(self, _neutron_m, _nova_m): - ctx = self._simplectx() - - external_server = mock.Mock() - external_server.human_id = '_server' - external_server.metadata = {} - external_server.networks = {'abc': ['127.0.0.1']} - external_server.accessIPv4 = True - external_server.accessIPv6 = False - external_server.interface_list = mock.Mock(return_value=[]) - - with mock.patch( - 'nova_plugin.server.' - 'get_openstack_ids_of_connected_nodes_by_openstack_type', - mock.Mock(return_value=[]) - ): - with mock.patch( - 'nova_plugin.server.' - 'get_openstack_id_of_single_connected_node_by_openstack_type', - mock.Mock(return_value=None) - ): - with mock.patch('openstack_plugin_common._find_context_in_kw', - mock.Mock(return_value=ctx)): - with mock.patch( - 'nova_plugin.server.use_external_resource', - mock.Mock(return_value=external_server) - ): - nova_plugin.server.create(args=[]) - - -class TestMergeNICs(unittest.TestCase): - def test_no_management_network(self): - mgmt_network_id = None - nics = [{'net-id': 'other network'}] - - merged = nova_plugin.server._merge_nics(mgmt_network_id, nics) - - self.assertEqual(len(merged), 1) - self.assertEqual(merged[0]['net-id'], 'other network') - - def test_merge_prepends_management_network(self): - """When the mgmt network isnt in a relationship, its the 1st nic.""" - mgmt_network_id = 'management network' - nics = [{'net-id': 'other network'}] - - merged = nova_plugin.server._merge_nics(mgmt_network_id, nics) - - self.assertEqual(len(merged), 2) - self.assertEqual(merged[0]['net-id'], 'management network') - - def test_management_network_in_relationships(self): - """When the mgmt network was in a relationship, it's not prepended.""" - mgmt_network_id = 'management network' - nics = [{'net-id': 'other network'}, {'net-id': 'management network'}] - - merged = nova_plugin.server._merge_nics(mgmt_network_id, nics) - - self.assertEqual(nics, merged) - - -class TestNormalizeNICs(unittest.TestCase): - def test_normalize_port_priority(self): - """Whe there's both net-id and port-id, port-id is used.""" - nics = [{'net-id': '1'}, {'port-id': '2'}, {'net-id': 3, 'port-id': 4}] - normalized = nova_plugin.server._normalize_nics(nics) - expected = [{'net-id': '1'}, {'port-id': '2'}, {'port-id': 4}] - self.assertEqual(expected, normalized) - - -class MockNeutronClient(NeutronClientWithSugar): - """A fake neutron client with hard-coded test data.""" - - @mock.patch('openstack_plugin_common.OpenStackClient.__init__', - new=mock.Mock()) - def __init__(self): - super(MockNeutronClient, self).__init__() - - @staticmethod - def _search_filter(objs, search_params): - """Mock neutron's filtering by attributes in list_* methods. - - list_* methods (list_networks, list_ports) - """ - def _matches(obj, search_params): - return all(obj[k] == v for k, v in search_params.items()) - return [obj for obj in objs if _matches(obj, search_params)] - - def list_networks(self, **search_params): - networks = [ - {'name': 'network1', 'id': '1'}, - {'name': 'network2', 'id': '2'}, - {'name': 'network3', 'id': '3'}, - {'name': 'network4', 'id': '4'}, - {'name': 'network5', 'id': '5'}, - {'name': 'network6', 'id': '6'}, - {'name': 'other', 'id': 'other'} - ] - return {'networks': self._search_filter(networks, search_params)} - - def list_ports(self, **search_params): - ports = [ - {'name': 'port1', 'id': '1', 'network_id': '1'}, - {'name': 'port2', 'id': '2', 'network_id': '1'}, - {'name': 'port3', 'id': '3', 'network_id': '2'}, - {'name': 'port4', 'id': '4', 'network_id': '2'}, - ] - return {'ports': self._search_filter(ports, search_params)} - - def show_port(self, port_id): - ports = self.list_ports(id=port_id) - return {'port': ports['ports'][0]} - - -class NICTestBase(RelationshipsTestBase): - """Base test class for the NICs tests. - - It comes with helper methods to create a mock cloudify context, with - the specified relationships. - """ - mock_neutron = MockNeutronClient() - - def _relationship_spec(self, obj, objtype): - return {'node': {'properties': obj}, - 'instance': { - 'runtime_properties': {OPENSTACK_TYPE_PROPERTY: objtype, - OPENSTACK_ID_PROPERTY: obj['id']}}} - - def _make_vm_ctx_with_ports(self, management_network_name, ports): - port_specs = [self._relationship_spec(obj, PORT_OPENSTACK_TYPE) - for obj in ports] - vm_properties = {'management_network_name': management_network_name} - return self._make_vm_ctx_with_relationships(port_specs, - vm_properties) - - def _make_vm_ctx_with_networks(self, management_network_name, networks): - network_specs = [self._relationship_spec(obj, NETWORK_OPENSTACK_TYPE) - for obj in networks] - vm_properties = {'management_network_name': management_network_name} - return self._make_vm_ctx_with_relationships(network_specs, - vm_properties) - - -class TestServerNICs(NICTestBase): - """Test preparing the NICs list from server<->network relationships. - - Each test creates a cloudify context that represents a openstack VM - with relationships to networks. Then, examine the NICs list produced from - the relationships. - """ - def test_nova_server_creation_nics_ordering(self): - """NIC list keeps the order of the relationships. - - The nics= list passed to nova.server.create should be ordered - depending on the relationships to the networks (as defined in the - blueprint). - """ - ctx = self._make_vm_ctx_with_networks( - management_network_name='network1', - networks=[ - {'id': '1'}, - {'id': '2'}, - {'id': '3'}, - {'id': '4'}, - {'id': '5'}, - {'id': '6'}, - ]) - server = {'meta': {}} - - _prepare_server_nics( - self.mock_neutron, ctx, server) - - self.assertEqual( - ['1', '2', '3', '4', '5', '6'], - [n['net-id'] for n in server['nics']]) - - def test_server_creation_prepends_mgmt_network(self): - """If the management network isn't in a relation, it's the first NIC. - - Creating the server examines the relationships, and if it doesn't find - a relationship to the management network, it adds the management - network to the NICs list, as the first element. - """ - ctx = self._make_vm_ctx_with_networks( - management_network_name='other', - networks=[ - {'id': '1'}, - {'id': '2'}, - {'id': '3'}, - {'id': '4'}, - {'id': '5'}, - {'id': '6'}, - ]) - server = {'meta': {}} - - _prepare_server_nics( - self.mock_neutron, ctx, server) - - first_nic = server['nics'][0] - self.assertEqual('other', first_nic['net-id']) - self.assertEqual(7, len(server['nics'])) - - def test_server_creation_uses_relation_mgmt_nic(self): - """If the management network is in a relation, it isn't prepended. - - If the server has a relationship to the management network, - a new NIC isn't prepended to the list. - """ - ctx = self._make_vm_ctx_with_networks( - management_network_name='network1', - networks=[ - {'id': '1'}, - {'id': '2'}, - {'id': '3'}, - {'id': '4'}, - {'id': '5'}, - {'id': '6'}, - ]) - server = {'meta': {}} - - _prepare_server_nics( - self.mock_neutron, ctx, server) - self.assertEqual(6, len(server['nics'])) - - -class TestServerPortNICs(NICTestBase): - """Test preparing the NICs list from server<->port relationships. - - Create a cloudify ctx representing a vm with relationships to - openstack ports. Then examine the resulting NICs list: check that it - contains the networks that the ports were connected to, and that each - connection uses the port that was provided. - """ - - def test_network_with_port(self): - """Port on the management network is used to connect to it. - - The NICs list entry for the management network contains the - port-id of the port from the relationship, but doesn't contain net-id. - """ - ports = [{'id': '1'}] - ctx = self._make_vm_ctx_with_ports('network1', ports) - server = {'meta': {}} - - _prepare_server_nics( - self.mock_neutron, ctx, server) - - self.assertEqual([{'port-id': '1'}], server['nics']) - - def test_port_not_to_mgmt_network(self): - """A NICs list entry is added with the network and the port. - - A relationship to a port must not only add a NIC, but the NIC must - also make sure to use that port. - """ - ports = [{'id': '1'}] - ctx = self._make_vm_ctx_with_ports('other', ports) - server = {'meta': {}} - - _prepare_server_nics( - self.mock_neutron, ctx, server) - expected = [ - {'net-id': 'other'}, - {'port-id': '1'} - ] - self.assertEqual(expected, server['nics']) - - -class TestBootFromVolume(unittest.TestCase): - - @mock.patch('nova_plugin.server._get_boot_volume_relationships', - autospec=True) - def test_handle_boot_volume(self, mock_get_rels): - mock_get_rels.return_value.runtime_properties = { - 'external_id': 'test-id', - 'availability_zone': 'test-az', - } - server = {} - ctx = mock.MagicMock() - nova_plugin.server._handle_boot_volume(server, ctx) - self.assertEqual({'vda': 'test-id:::0'}, - server['block_device_mapping']) - self.assertEqual('test-az', - server['availability_zone']) - - @mock.patch('nova_plugin.server._get_boot_volume_relationships', - autospec=True, return_value=[]) - def test_handle_boot_volume_no_boot_volume(self, *_): - server = {} - ctx = mock.MagicMock() - nova_plugin.server._handle_boot_volume(server, ctx) - self.assertNotIn('block_device_mapping', server) - - -class TestImageFromRelationships(unittest.TestCase): - - @mock.patch('glance_plugin.image.' - 'get_openstack_ids_of_connected_nodes_by_openstack_type', - autospec=True, return_value=['test-id']) - def test_handle_boot_image(self, *_): - server = {} - ctx = mock.MagicMock() - nova_plugin.server.handle_image_from_relationship(server, 'image', ctx) - self.assertEqual({'image': 'test-id'}, server) - - @mock.patch('glance_plugin.image.' - 'get_openstack_ids_of_connected_nodes_by_openstack_type', - autospec=True, return_value=[]) - def test_handle_boot_image_no_image(self, *_): - server = {} - ctx = mock.MagicMock() - nova_plugin.server.handle_image_from_relationship(server, 'image', ctx) - self.assertNotIn('image', server) - - -@mock.patch('openstack_plugin_common.OpenStackClient._validate_auth_params') -class TestServerSGAttachments(unittest.TestCase): - SecurityGroup = collections.namedtuple( - 'SecurityGroup', ['id', 'name'], verbose=True) - - def setUp(self): - ctx = MockCloudifyContext( - target=MockContext({ - 'instance': MockNodeInstanceContext( - 'sg1', { - OPENSTACK_ID_PROPERTY: 'test-sg', - OPENSTACK_NAME_PROPERTY: 'test-sg-name' - }) - }), - source=MockContext({ - 'node': mock.MagicMock(), - 'instance': MockNodeInstanceContext( - 'server', { - OPENSTACK_ID_PROPERTY: 'server' - } - )}) - ) - - current_ctx.set(ctx) - self.addCleanup(current_ctx.clear) - findctx = mock.patch( - 'openstack_plugin_common._find_context_in_kw', - return_value=ctx, - ) - findctx.start() - self.addCleanup(findctx.stop) - - @mock.patch('openstack_plugin_common.NovaClientWithSugar') - def test_detach_already_detached(self, client, *kwargs): - server = client.return_value.servers.get.return_value - server.remove_security_group.side_effect = \ - nova_exceptions.NotFound('test') - nova_plugin.server.disconnect_security_group() - - @mock.patch('openstack_plugin_common.NovaClientWithSugar') - def test_connect_not_connected(self, client, *kwargs): - security_groups = [self.SecurityGroup('test-sg-2', 'test-sg-2-name')] - server = client.return_value.servers.get.return_value - server.list_security_group.return_value = security_groups - server.add_security_group.side_effect = ( - lambda _: security_groups.append( - self.SecurityGroup('test-sg', 'test-sg-name'))) - nova_plugin.server.connect_security_group() - server.add_security_group.assert_called_once_with('test-sg-name') - - @mock.patch('openstack_plugin_common.NovaClientWithSugar') - def test_connect_already_connected(self, client, *kwargs): - security_groups = [self.SecurityGroup('test-sg', 'test-sg-name'), - self.SecurityGroup('test-sg-2', 'test-sg-2-name')] - server = client.return_value.servers.get.return_value - server.list_security_group.return_value = security_groups - nova_plugin.server.connect_security_group() - server.add_security_group.assert_not_called() - - -class TestServerRelationships(unittest.TestCase): - - def _get_ctx_mock(self, instance_id, boot): - rel_specs = [MockRelationshipContext( - target=MockRelationshipSubjectContext(node=MockNodeContext( - properties={'boot': boot}), instance=MockNodeInstanceContext( - runtime_properties={ - OPENSTACK_TYPE_PROPERTY: VOLUME_OPENSTACK_TYPE, - OPENSTACK_ID_PROPERTY: instance_id, - VOLUME_BOOTABLE: False - })))] - ctx = mock.MagicMock() - ctx.instance = MockNodeInstanceContext(relationships=rel_specs) - ctx.logger = setup_logger('mock-logger') - return ctx - - def test_boot_volume_relationship(self): - instance_id = 'test-id' - ctx = self._get_ctx_mock(instance_id, True) - rel_target = ctx.instance.relationships[0].target - rel_target.instance.runtime_properties[VOLUME_BOOTABLE] = True - result = nova_plugin.server._get_boot_volume_relationships( - VOLUME_OPENSTACK_TYPE, ctx) - self.assertEqual( - instance_id, - result.runtime_properties['external_id']) - - def test_no_boot_volume_relationship(self): - instance_id = 'test-id' - ctx = self._get_ctx_mock(instance_id, False) - result = nova_plugin.server._get_boot_volume_relationships( - VOLUME_OPENSTACK_TYPE, ctx) - self.assertFalse(result) - - -class TestServerNetworkRuntimeProperties(unittest.TestCase): - - @property - def mock_ctx(self): - return MockCloudifyContext( - node_id='test', - deployment_id='test', - properties={}, - operation={'retry_number': 0}, - provider_context={'resources': {}} - ) - - def test_server_networks_runtime_properties_empty_server(self): - ctx = self.mock_ctx - current_ctx.set(ctx=ctx) - server = mock.MagicMock() - setattr(server, 'networks', {}) - with self.assertRaisesRegexp( - NonRecoverableError, - 'The server was created but not attached to a network.'): - nova_plugin.server._set_network_and_ip_runtime_properties(server) - - def test_server_networks_runtime_properties_valid_networks(self): - ctx = self.mock_ctx - current_ctx.set(ctx=ctx) - server = mock.MagicMock() - network_id = 'management_network' - network_ips = ['good', 'bad1', 'bad2'] - setattr(server, - 'networks', - {network_id: network_ips}) - nova_plugin.server._set_network_and_ip_runtime_properties(server) - self.assertIn('networks', ctx.instance.runtime_properties.keys()) - self.assertIn('ip', ctx.instance.runtime_properties.keys()) - self.assertEquals(ctx.instance.runtime_properties['ip'], 'good') - self.assertEquals(ctx.instance.runtime_properties['networks'], - {network_id: network_ips}) - self.assertIn('ipv4_address', ctx.instance.runtime_properties) - self.assertIn('ipv4_addresses', ctx.instance.runtime_properties) - self.assertIn('ipv6_address', ctx.instance.runtime_properties) - self.assertIn('ipv6_addresses', ctx.instance.runtime_properties) - - def test_server_networks_runtime_properties_valid_networks_no_mgmt(self): - ctx = self.mock_ctx - current_ctx.set(ctx=ctx) - server = mock.MagicMock() - network_id = None - network_ips = ['good', 'bad1', 'bad2'] - setattr(server, - 'networks', - {network_id: network_ips}) - nova_plugin.server._set_network_and_ip_runtime_properties(server) - self.assertIn('networks', ctx.instance.runtime_properties.keys()) - self.assertIn('ip', ctx.instance.runtime_properties.keys()) - self.assertEquals(ctx.instance.runtime_properties['ip'], 'good') - self.assertEquals(ctx.instance.runtime_properties['networks'], - {network_id: network_ips}) - - def test_server_networks_runtime_properties_empty_networks(self): - ctx = self.mock_ctx - current_ctx.set(ctx=ctx) - server = mock.MagicMock() - network_id = 'management_network' - network_ips = [] - setattr(server, - 'networks', - {network_id: network_ips}) - nova_plugin.server._set_network_and_ip_runtime_properties(server) - self.assertIn('networks', ctx.instance.runtime_properties.keys()) - self.assertIn('ip', ctx.instance.runtime_properties.keys()) - self.assertEquals(ctx.instance.runtime_properties['ip'], None) - self.assertEquals(ctx.instance.runtime_properties['networks'], - {network_id: network_ips}) diff --git a/nova_plugin/tests/test_server_image_and_flavor.py b/nova_plugin/tests/test_server_image_and_flavor.py deleted file mode 100644 index 2ae47584..00000000 --- a/nova_plugin/tests/test_server_image_and_flavor.py +++ /dev/null @@ -1,228 +0,0 @@ -######### -# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - - -import unittest - -import mock -from novaclient import exceptions as nova_exceptions - -import nova_plugin.server as server -from cloudify.exceptions import NonRecoverableError -from cloudify.mocks import MockCloudifyContext - - -class TestServerImageAndFlavor(unittest.TestCase): - - def test_no_image_and_no_flavor(self): - node_props = { - 'image': '', - 'flavor': '' - } - with mock.patch('nova_plugin.server.ctx', - self._get_mock_ctx_with_node_properties(node_props)): - nova_client = self._get_mocked_nova_client() - - serv = {} - self.assertRaises(NonRecoverableError, - server._handle_image_or_flavor, - serv, nova_client, 'image') - self.assertRaises(NonRecoverableError, - server._handle_image_or_flavor, - serv, nova_client, 'flavor') - - def test_image_and_flavor_properties_as_names(self): - node_props = { - 'image': 'some-image-name', - 'flavor': 'some-flavor-name' - } - with mock.patch('nova_plugin.server.ctx', - self._get_mock_ctx_with_node_properties(node_props)): - nova_client = self._get_mocked_nova_client() - - serv = {} - server._handle_image_or_flavor(serv, nova_client, 'image') - server._handle_image_or_flavor(serv, nova_client, 'flavor') - - self.assertEquals('some-image-id', serv.get('image')) - self.assertEquals('some-flavor-id', serv.get('flavor')) - - def test_image_and_flavor_properties_as_ids(self): - node_props = { - 'image': 'some-image-id', - 'flavor': 'some-flavor-id' - } - with mock.patch('nova_plugin.server.ctx', - self._get_mock_ctx_with_node_properties(node_props)): - nova_client = self._get_mocked_nova_client() - - serv = {} - server._handle_image_or_flavor(serv, nova_client, 'image') - server._handle_image_or_flavor(serv, nova_client, 'flavor') - - self.assertEquals('some-image-id', serv.get('image')) - self.assertEquals('some-flavor-id', serv.get('flavor')) - - def test_image_id_and_flavor_id(self): - node_props = { - 'image': '', - 'flavor': '' - } - with mock.patch('nova_plugin.server.ctx', - self._get_mock_ctx_with_node_properties(node_props)): - nova_client = self._get_mocked_nova_client() - - serv = {} - serv['image'] = 'some-image-id' - serv['flavor'] = 'some-flavor-id' - server._handle_image_or_flavor(serv, nova_client, 'image') - server._handle_image_or_flavor(serv, nova_client, 'flavor') - - self.assertEquals('some-image-id', serv.get('image')) - self.assertEquals('some-flavor-id', serv.get('flavor')) - - def test_image_name_and_flavor_name(self): - node_props = { - 'image': '', - 'flavor': '' - } - with mock.patch('nova_plugin.server.ctx', - self._get_mock_ctx_with_node_properties(node_props)): - nova_client = self._get_mocked_nova_client() - - serv = {} - serv['image_name'] = 'some-image-name' - serv['flavor_name'] = 'some-flavor-name' - server._handle_image_or_flavor(serv, nova_client, 'image') - server._handle_image_or_flavor(serv, nova_client, 'flavor') - - self.assertEquals('some-image-id', serv.get('image')) - self.assertNotIn('image_name', serv) - self.assertEquals('some-flavor-id', serv.get('flavor')) - self.assertNotIn('flavor_name', serv) - - def test_unknown_image_name_and_flavor_name(self): - node_props = { - 'image': '', - 'flavor': '' - } - with mock.patch('nova_plugin.server.ctx', - self._get_mock_ctx_with_node_properties(node_props)): - nova_client = self._get_mocked_nova_client() - - serv = {} - serv['image_name'] = 'some-unknown-image-name' - serv['flavor_name'] = 'some-unknown-flavor-name' - - self.assertRaises(nova_exceptions.NotFound, - server._handle_image_or_flavor, - serv, nova_client, 'image') - self.assertRaises(nova_exceptions.NotFound, - server._handle_image_or_flavor, - serv, nova_client, 'flavor') - - def test_image_id_and_flavor_id_override_on_properties(self): - node_props = { - 'image': 'properties-image-id', - 'flavor': 'properties-flavor-id' - } - with mock.patch('nova_plugin.server.ctx', - self._get_mock_ctx_with_node_properties(node_props)): - nova_client = self._get_mocked_nova_client() - - serv = {} - serv['image'] = 'some-image-id' - serv['flavor'] = 'some-flavor-id' - server._handle_image_or_flavor(serv, nova_client, 'image') - server._handle_image_or_flavor(serv, nova_client, 'flavor') - - self.assertEquals('some-image-id', serv.get('image')) - self.assertEquals('some-flavor-id', serv.get('flavor')) - - def test_image_name_and_flavor_name_override_on_properties(self): - node_props = { - 'image': 'properties-image-id', - 'flavor': 'properties-flavor-id' - } - with mock.patch('nova_plugin.server.ctx', - self._get_mock_ctx_with_node_properties(node_props)): - nova_client = self._get_mocked_nova_client() - - serv = {} - serv['image_name'] = 'some-image-name' - serv['flavor_name'] = 'some-flavor-name' - server._handle_image_or_flavor(serv, nova_client, 'image') - server._handle_image_or_flavor(serv, nova_client, 'flavor') - - self.assertEquals('some-image-id', serv.get('image')) - self.assertNotIn('image_name', serv) - self.assertEquals('some-flavor-id', serv.get('flavor')) - self.assertNotIn('flavor_name', serv) - - def test_image_name_and_flavor_name_override_on_image_and_flavor_ids(self): - node_props = { - 'image': '', - 'flavor': '' - } - with mock.patch('nova_plugin.server.ctx', - self._get_mock_ctx_with_node_properties(node_props)): - nova_client = self._get_mocked_nova_client() - - serv = {} - serv['image'] = 'some-bad-image-id' - serv['image_name'] = 'some-image-name' - serv['flavor'] = 'some-bad-flavor-id' - serv['flavor_name'] = 'some-flavor-name' - server._handle_image_or_flavor(serv, nova_client, 'image') - server._handle_image_or_flavor(serv, nova_client, 'flavor') - - self.assertEquals('some-image-id', serv.get('image')) - self.assertNotIn('image_name', serv) - self.assertEquals('some-flavor-id', serv.get('flavor')) - self.assertNotIn('flavor_name', serv) - - @staticmethod - def _get_mocked_nova_client(): - nova_client = mock.MagicMock() - - def mock_get_if_exists(prop_name, **kwargs): - is_image = prop_name == 'image' - searched_name = kwargs.get('name') - if (is_image and searched_name == 'some-image-name') or \ - (not is_image and searched_name == 'some-flavor-name'): - result = mock.MagicMock() - result.id = 'some-image-id' if \ - is_image else 'some-flavor-id' - return result - return [] - - def mock_find_generator(prop_name): - def mock_find(**kwargs): - result = mock_get_if_exists(prop_name, **kwargs) - if not result: - raise nova_exceptions.NotFound(404) - return result - return mock_find - - nova_client.cosmo_plural = lambda x: '{0}s'.format(x) - nova_client.cosmo_get_if_exists = mock_get_if_exists - nova_client.images.find = mock_find_generator('image') - nova_client.flavors.find = mock_find_generator('flavor') - return nova_client - - @staticmethod - def _get_mock_ctx_with_node_properties(properties): - return MockCloudifyContext(node_id='test_node_id', - properties=properties) diff --git a/nova_plugin/tests/test_userdata.py b/nova_plugin/tests/test_userdata.py deleted file mode 100644 index d7f056d7..00000000 --- a/nova_plugin/tests/test_userdata.py +++ /dev/null @@ -1,63 +0,0 @@ -######### -# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - -import unittest - -import mock - -from cloudify.mocks import MockCloudifyContext - -from nova_plugin import userdata - - -def ctx_mock(): - result = MockCloudifyContext( - node_id='d', - properties={}) - result.node.type_hierarchy = ['cloudify.nodes.Compute'] - return result - - -class TestServerUserdataHandling(unittest.TestCase): - - @mock.patch('nova_plugin.userdata.ctx', ctx_mock()) - def test_no_userdata(self): - server_conf = {} - userdata.handle_userdata(server_conf) - self.assertEqual(server_conf, {}) - - def test_agent_installation_userdata(self): - ctx = ctx_mock() - ctx.agent.init_script = lambda: 'SCRIPT' - with mock.patch('nova_plugin.userdata.ctx', ctx): - server_conf = {} - userdata.handle_userdata(server_conf) - self.assertEqual(server_conf, {'userdata': 'SCRIPT'}) - - @mock.patch('nova_plugin.userdata.ctx', ctx_mock()) - def test_existing_userdata(self): - server_conf = {'userdata': 'EXISTING'} - server_conf_copy = server_conf.copy() - userdata.handle_userdata(server_conf) - self.assertEqual(server_conf, server_conf_copy) - - def test_existing_and_agent_installation_userdata(self): - ctx = ctx_mock() - ctx.agent.init_script = lambda: '#! SCRIPT' - with mock.patch('nova_plugin.userdata.ctx', ctx): - server_conf = {'userdata': '#! EXISTING'} - userdata.handle_userdata(server_conf) - self.assertTrue(server_conf['userdata'].startswith( - 'Content-Type: multi')) diff --git a/nova_plugin/tests/test_validation.py b/nova_plugin/tests/test_validation.py deleted file mode 100644 index aa1dfdd8..00000000 --- a/nova_plugin/tests/test_validation.py +++ /dev/null @@ -1,194 +0,0 @@ -######### -# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - -import os -from os import path -import tempfile -import shutil - -import unittest -import mock - -from cloudify.test_utils import workflow_test -from nova_plugin.keypair import creation_validation -from cloudify.exceptions import NonRecoverableError - -PRIVATE_KEY_NAME = 'private_key' - - -class TestValidation(unittest.TestCase): - - blueprint_path = path.join('resources', - 'test-keypair-validation-blueprint.yaml') - - def setUp(self): - _, fp = tempfile.mkstemp() - self.private_key = fp - _, fp = tempfile.mkstemp() - self.not_readable_private_key = fp - os.chmod(self.not_readable_private_key, 0o200) - self.temp_dir = tempfile.mkdtemp() - self.not_writable_temp_dir_r = tempfile.mkdtemp() - os.chmod(self.not_writable_temp_dir_r, 0o400) - self.not_writable_temp_dir_rx = tempfile.mkdtemp() - os.chmod(self.not_writable_temp_dir_rx, 0o500) - self.not_writable_temp_dir_rw = tempfile.mkdtemp() - os.chmod(self.not_writable_temp_dir_rw, 0o600) - - def tearDown(self): - if self.private_key: - os.remove(self.private_key) - - if self.not_readable_private_key: - os.remove(self.not_readable_private_key) - - shutil.rmtree(self.not_writable_temp_dir_r, ignore_errors=True) - shutil.rmtree(self.not_writable_temp_dir_rx, ignore_errors=True) - shutil.rmtree(self.not_writable_temp_dir_rw, ignore_errors=True) - shutil.rmtree(self.temp_dir, ignore_errors=True) - - def new_keypair_create(self, *args, **kwargs): - creation_validation(*args, **kwargs) - - def new_keypair_create_with_exception(self, *args, **kwargs): - self.assertRaises(NonRecoverableError, creation_validation, - *args, **kwargs) - - def get_keypair_inputs_private_key(self, is_external, **kwargs): - return { - 'private_key': self.private_key, - 'is_keypair_external': is_external - } - - def get_keypair_inputs_not_readable_private_key(self, - is_external, **kwargs): - return { - 'private_key': self.not_readable_private_key, - 'is_keypair_external': is_external - } - - def get_keypair_inputs_not_writable_dir_r(self, is_external, **kwargs): - return { - 'private_key': path.join(self.not_writable_temp_dir_r, - PRIVATE_KEY_NAME), - 'is_keypair_external': is_external - } - - def get_keypair_inputs_not_writable_dir_rx(self, is_external, **kwargs): - return { - 'private_key': path.join(self.not_writable_temp_dir_rx, - PRIVATE_KEY_NAME), - 'is_keypair_external': is_external - } - - def get_keypair_inputs_not_writable_dir_rw(self, is_external, **kwargs): - return { - 'private_key': path.join(self.not_writable_temp_dir_rw, - PRIVATE_KEY_NAME), - 'is_keypair_external': is_external - } - - def get_keypair_inputs_temp_dir(self, is_external, **kwargs): - return { - 'private_key': path.join(self.temp_dir, PRIVATE_KEY_NAME), - 'is_keypair_external': is_external - } - - @workflow_test(blueprint_path, inputs={ - 'private_key': '', - 'is_keypair_external': False - }) - @mock.patch('nova_plugin.keypair.validate_resource') - def test_keypair_valid_config(self, cfy_local, *args): - - with mock.patch('nova_plugin.keypair.create', - new=self.new_keypair_create): - cfy_local.execute('install', task_retries=0) - - @workflow_test(blueprint_path, inputs='get_keypair_inputs_private_key', - input_func_kwargs={'is_external': True}) - @mock.patch('nova_plugin.keypair.validate_resource') - def test_keypair_valid_config_external(self, cfy_local, *args): - - with mock.patch('nova_plugin.keypair.create', - new=self.new_keypair_create): - cfy_local.execute('install', task_retries=0) - - @workflow_test(blueprint_path, inputs='get_keypair_inputs_temp_dir', - input_func_kwargs={'is_external': True}) - @mock.patch('nova_plugin.keypair.validate_resource') - def test_keypair_no_private_key(self, cfy_local, *args): - - with mock.patch('nova_plugin.keypair.create', - new=self.new_keypair_create_with_exception): - cfy_local.execute('install', task_retries=0) - - @workflow_test(blueprint_path, inputs='get_keypair_inputs_private_key', - input_func_kwargs={'is_external': False}) - @mock.patch('nova_plugin.keypair.validate_resource') - def test_keypair_local_and_exists(self, cfy_local, *args): - - with mock.patch('nova_plugin.keypair.create', - new=self.new_keypair_create_with_exception): - cfy_local.execute('install', task_retries=0) - - @workflow_test(blueprint_path, inputs='get_keypair_inputs_temp_dir', - input_func_kwargs={'is_external': False}) - @mock.patch('nova_plugin.keypair.validate_resource') - def test_keypair_local_temp_dir(self, cfy_local, *args): - - with mock.patch('nova_plugin.keypair.create', - new=self.new_keypair_create): - cfy_local.execute('install', task_retries=0) - - @workflow_test(blueprint_path, - inputs='get_keypair_inputs_not_writable_dir_r', - input_func_kwargs={'is_external': False}) - @mock.patch('nova_plugin.keypair.validate_resource') - def test_keypair_local_non_writable_dir_r(self, cfy_local, *args): - - with mock.patch('nova_plugin.keypair.create', - new=self.new_keypair_create_with_exception): - cfy_local.execute('install', task_retries=0) - - @workflow_test(blueprint_path, - inputs='get_keypair_inputs_not_writable_dir_rx', - input_func_kwargs={'is_external': False}) - @mock.patch('nova_plugin.keypair.validate_resource') - def test_keypair_local_non_writable_dir_rx(self, cfy_local, *args): - - with mock.patch('nova_plugin.keypair.create', - new=self.new_keypair_create_with_exception): - cfy_local.execute('install', task_retries=0) - - @workflow_test(blueprint_path, - inputs='get_keypair_inputs_not_writable_dir_rw', - input_func_kwargs={'is_external': False}) - @mock.patch('nova_plugin.keypair.validate_resource') - def test_keypair_local_non_writable_dir_rw(self, cfy_local, *args): - - with mock.patch('nova_plugin.keypair.create', - new=self.new_keypair_create_with_exception): - cfy_local.execute('install', task_retries=0) - - @workflow_test(blueprint_path, - inputs='get_keypair_inputs_not_readable_private_key', - input_func_kwargs={'is_external': True}) - @mock.patch('nova_plugin.keypair.validate_resource') - def test_keypair_not_readable_private_key(self, cfy_local, *args): - - with mock.patch('nova_plugin.keypair.create', - new=self.new_keypair_create_with_exception): - cfy_local.execute('install', task_retries=0) diff --git a/nova_plugin/userdata.py b/nova_plugin/userdata.py deleted file mode 100644 index ba63bb53..00000000 --- a/nova_plugin/userdata.py +++ /dev/null @@ -1,50 +0,0 @@ -######### -# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - -import requests - -from cloudify import compute -from cloudify import exceptions -from cloudify import ctx - - -def handle_userdata(server): - - existing_userdata = server.get('userdata') - install_agent_userdata = ctx.agent.init_script() - - if not (existing_userdata or install_agent_userdata): - return - - if isinstance(existing_userdata, dict): - ud_type = existing_userdata['type'] - if ud_type not in userdata_handlers: - raise exceptions.NonRecoverableError( - "Invalid type '{0}' for server userdata)".format(ud_type)) - existing_userdata = userdata_handlers[ud_type](existing_userdata) - - if not existing_userdata: - final_userdata = install_agent_userdata - elif not install_agent_userdata: - final_userdata = existing_userdata - else: - final_userdata = compute.create_multi_mimetype_userdata( - [existing_userdata, install_agent_userdata]) - server['userdata'] = final_userdata - - -userdata_handlers = { - 'http': lambda params: requests.get(params['url']).text -} diff --git a/manager_tests/__init__.py b/openstack_plugin/__init__.py similarity index 100% rename from manager_tests/__init__.py rename to openstack_plugin/__init__.py diff --git a/openstack_plugin/constants.py b/openstack_plugin/constants.py new file mode 100644 index 00000000..1e061f75 --- /dev/null +++ b/openstack_plugin/constants.py @@ -0,0 +1,143 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Runtime properties keys +RESOURCE_ID = 'id' +OPENSTACK_TYPE_PROPERTY = 'type' +OPENSTACK_NAME_PROPERTY = 'name' +OPENSTACK_AZ_PROPERTY = 'availability_zone' +USE_EXTERNAL_RESOURCE_PROPERTY = 'use_external_resource' +SERVER_TASK_CREATE = 'create_server_task' +SERVER_TASK_STOP = 'stop_server_task' +SERVER_TASK_DELETE = 'delete_server_task' +SERVER_TASK_START = 'start_server_task' +SERVER_TASK_STATE = 'task_state' +SERVER_TASK_BACKUP_DONE = 'backup_done' +SERVER_TASK_RESTORE_STATE = 'restore_state' +SERVER_INTERFACE_IDS = 'interfaces' +VOLUME_TASK_DELETE = 'delete_volume_task' +VOLUME_ATTACHMENT_TASK = 'attach_volume_task' +VOLUME_DETACHMENT_TASK = 'detach_volume_task' +VOLUME_BACKUP_TASK = 'backup_volume_task' +VOLUME_SNAPSHOT_TASK = 'snapshot_volume_task' +VOLUME_SNAPSHOT_ID = 'snapshot_id' +VOLUME_BACKUP_ID = 'backup_id' +VOLUME_ATTACHMENT_ID = 'attachment_id' + +# Openstack Server status constants. +# Full lists here: https://bit.ly/2UyB5V5 # NOQA +SERVER_STATUS_ACTIVE = 'ACTIVE' +SERVER_STATUS_BUILD = 'BUILD' +SERVER_STATUS_SHUTOFF = 'SHUTOFF' +SERVER_STATUS_SUSPENDED = 'SUSPENDED' +SERVER_STATUS_ERROR = 'ERROR' +SERVER_STATUS_REBOOT = 'REBOOT' +SERVER_STATUS_HARD_REBOOT = 'HARD_REBOOT' +SERVER_STATUS_UNKNOWN = 'UNKNOWN' + +# Openstack volume attachment status constants +VOLUME_STATUS_CREATING = 'creating' +VOLUME_STATUS_DELETING = 'deleting' +VOLUME_STATUS_AVAILABLE = 'available' +VOLUME_STATUS_IN_USE = 'in-use' +VOLUME_STATUS_ERROR = 'error' +VOLUME_STATUS_ERROR_DELETING = 'error_deleting' +VOLUME_ERROR_STATUSES = (VOLUME_STATUS_ERROR, VOLUME_STATUS_ERROR_DELETING) + +# Openstack Server reboot actions +SERVER_REBOOT_SOFT = 'SOFT' +SERVER_REBOOT_HARD = 'HARD' + +# Openstack resources types +SERVER_OPENSTACK_TYPE = 'server' +SERVER_GROUP_OPENSTACK_TYPE = 'server_group' +INSTANCE_OPENSTACK_TYPE = 'instance' +HOST_AGGREGATE_OPENSTACK_TYPE = 'aggregate' +IMAGE_OPENSTACK_TYPE = 'image' +FLAVOR_OPENSTACK_TYPE = 'flavor' +KEYPAIR_OPENSTACK_TYPE = 'key_pair' +USER_OPENSTACK_TYPE = 'user' +PROJECT_OPENSTACK_TYPE = 'project' +NETWORK_OPENSTACK_TYPE = 'network' +SUBNET_OPENSTACK_TYPE = 'subnet' +ROUTER_OPENSTACK_TYPE = 'router' +PORT_OPENSTACK_TYPE = 'port' +FLOATING_IP_OPENSTACK_TYPE = 'ip' +SECURITY_GROUP_OPENSTACK_TYPE = 'security_group' +SECURITY_GROUP_RULE_OPENSTACK_TYPE = 'security_group_rule' +RBAC_POLICY_OPENSTACK_TYPE = 'rbac_policy' +QOS_POLICY_OPENSTACK_TYPE = 'policy' +VOLUME_OPENSTACK_TYPE = 'volume' +VOLUME_BACKUP_OPENSTACK_TYPE = 'backup' +VOLUME_SNAPSHOT_OPENSTACK_TYPE = 'snapshot' +VOLUME_TYPE_OPENSTACK_TYPE = 'volume_type' + +# Openstack Image status +IMAGE_UPLOADING = 'image_uploading' +IMAGE_UPLOADING_PENDING = 'image_pending_upload' +IMAGE_STATUS_ACTIVE = 'active' + +# Cloudify node types +SERVER_GROUP_NODE_TYPE = 'cloudify.nodes.openstack.ServerGroup' +KEYPAIR_NODE_TYPE = 'cloudify.nodes.openstack.KeyPair' +IMAGE_NODE_TYPE = 'cloudify.nodes.openstack.Image' +NETWORK_NODE_TYPE = 'cloudify.nodes.openstack.Network' +PORT_NODE_TYPE = 'cloudify.nodes.openstack.Port' +SUBNET_NODE_TYPE = 'cloudify.nodes.openstack.Subnet' +VOLUME_NODE_TYPE = 'cloudify.nodes.openstack.Volume' +SECURITY_GROUP_NODE_TYPE = 'cloudify.nodes.openstack.SecurityGroup' + +# Cloudify relationship types +RBAC_POLICY_RELATIONSHIP_TYPE = \ + 'cloudify.relationships.openstack.rbac_policy_applied_to' + + +# Message constants +QUOTA_VALID_MSG = \ + 'OK: {0} (node {1}) can be created. provisioned {2}: {3}, quota: {4}' + +QUOTA_INVALID_MSG = \ + '{0} (node {1}) cannot be created due to quota limitations.' \ + 'provisioned {2}: {3}, quota: {4}' + +# General constants +OPENSTACK_RESOURCE_UUID = 'uuid' +OPENSTACK_PORT_ID = 'port_id' +OPENSTACK_NETWORK_ID = 'net_id' +PS_OPEN = '' +PS_CLOSE = '' +INFINITE_RESOURCE_QUOTA = -1 +SERVER_ACTION_STATUS_DONE = 'DONE' +SERVER_ACTION_STATUS_PENDING = 'PENDING' +SERVER_REBUILD_STATUS = 'rebuild_done' +SERVER_REBUILD_SPAWNING_STATUS = 'rebuild_spawning' +SERVER_ADMIN_PASSWORD = 'password' +IDENTITY_USERS = 'users' +IDENTITY_ROLES = 'roles' +IDENTITY_QUOTA = 'quota' +VOLUME_BOOTABLE = 'bootable' +VOLUME_DEVICE_NAME_PROPERTY = 'device_name' +CLOUDIFY_CREATE_OPERATION = 'cloudify.interfaces.lifecycle.create' +CLOUDIFY_CONFIGURE_OPERATION = 'cloudify.interfaces.lifecycle.configure' +CLOUDIFY_START_OPERATION = 'cloudify.interfaces.lifecycle.start' +CLOUDIFY_STOP_OPERATION = 'cloudify.interfaces.lifecycle.stop' +CLOUDIFY_DELETE_OPERATION = 'cloudify.interfaces.lifecycle.delete' +CLOUDIFY_CREATE_VALIDATION = 'cloudify.interfaces.validation.creation' +CLOUDIFY_NEW_NODE_OPERATIONS = [CLOUDIFY_CREATE_OPERATION, + CLOUDIFY_CONFIGURE_OPERATION, + CLOUDIFY_START_OPERATION, + CLOUDIFY_STOP_OPERATION, + CLOUDIFY_DELETE_OPERATION, + CLOUDIFY_CREATE_VALIDATION] diff --git a/openstack_plugin/decorators.py b/openstack_plugin/decorators.py new file mode 100644 index 00000000..bc8ed0d7 --- /dev/null +++ b/openstack_plugin/decorators.py @@ -0,0 +1,99 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Standard Imports +import sys + +# Third party imports +from openstack import exceptions +from cloudify import ctx as CloudifyContext +from cloudify.exceptions import NonRecoverableError +from cloudify.utils import exception_to_error_cause + +# Local imports +from openstack_plugin.constants import USE_EXTERNAL_RESOURCE_PROPERTY +from openstack_plugin.utils \ + import (resolve_ctx, + get_current_operation, + prepare_resource_instance, + handle_external_resource, + update_runtime_properties_for_operation_task, + allow_to_run_operation_for_external_node) + + +def with_openstack_resource(class_decl, + existing_resource_handler=None, + **existing_resource_kwargs): + """ + :param class_decl: This is a class for the openstack resource need to be + invoked + :param existing_resource_handler: This is a method that handle any + custom operation need to be done in case "use_external_resource" is set + to true + :param existing_resource_kwargs: This is an extra param that we may need + to pass to the external resource handler + :return: a wrapper object encapsulating the invoked function + """ + + def wrapper_outer(func): + def wrapper_inner(**kwargs): + # Get the context for the current task operation + ctx = kwargs.pop('ctx', CloudifyContext) + + # Resolve the actual context which need to run operation, + # the context could be belongs to relationship context or actual + # node context + ctx_node = resolve_ctx(ctx) + + # Get the current operation name + operation_name = get_current_operation() + + # Prepare the openstack resource that need to execute the + # current task operation + resource = \ + prepare_resource_instance(class_decl, ctx_node, kwargs) + + # Handle external resource when it is enabled + if ctx_node.node.properties.get(USE_EXTERNAL_RESOURCE_PROPERTY): + handle_external_resource(ctx_node, + resource, + existing_resource_handler, + **existing_resource_kwargs) + + # Update runtime properties + if not allow_to_run_operation_for_external_node( + operation_name): + # Update runtime properties for operation + update_runtime_properties_for_operation_task( + operation_name, + ctx_node, + resource) + + return + try: + kwargs['openstack_resource'] = resource + func(**kwargs) + update_runtime_properties_for_operation_task(operation_name, + ctx_node, + resource) + except exceptions.SDKException as error: + _, _, tb = sys.exc_info() + raise NonRecoverableError( + 'Failure while trying to request ' + 'Openstack API: {}'.format(error.message), + causes=[exception_to_error_cause(error, tb)]) + + return wrapper_inner + return wrapper_outer diff --git a/nova_plugin/tests/__init__.py b/openstack_plugin/resources/__init__.py similarity index 100% rename from nova_plugin/tests/__init__.py rename to openstack_plugin/resources/__init__.py diff --git a/openstack_plugin_common/tests/__init__.py b/openstack_plugin/resources/compute/__init__.py similarity index 100% rename from openstack_plugin_common/tests/__init__.py rename to openstack_plugin/resources/compute/__init__.py diff --git a/openstack_plugin/resources/compute/flavor.py b/openstack_plugin/resources/compute/flavor.py new file mode 100644 index 00000000..7b72e822 --- /dev/null +++ b/openstack_plugin/resources/compute/flavor.py @@ -0,0 +1,74 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Third party imports +from cloudify import ctx +from cloudify.exceptions import NonRecoverableError + +# Local imports +from openstack_sdk.resources.compute import OpenstackFlavor +from openstack_plugin.decorators import with_openstack_resource +from openstack_plugin.constants import (RESOURCE_ID, FLAVOR_OPENSTACK_TYPE) +from openstack_plugin.utils import add_resource_list_to_runtime_properties + + +@with_openstack_resource(OpenstackFlavor) +def create(openstack_resource): + """ + Create openstack flavor + :param openstack_resource: Instance of openstack flavor resource + """ + created_resource = openstack_resource.create() + ctx.instance.runtime_properties[RESOURCE_ID] = created_resource.id + + +@with_openstack_resource(OpenstackFlavor) +def list_flavors(openstack_resource, query=None, details=True): + """ + + :param openstack_resource: + :param query: + :param details: + :return: + """ + flavors = openstack_resource.list(details=details, query=query) + add_resource_list_to_runtime_properties(FLAVOR_OPENSTACK_TYPE, flavors) + + +@with_openstack_resource(OpenstackFlavor) +def delete(openstack_resource): + """ + Delete flavor resource + :param openstack_resource: Instance of openstack flavor resource. + """ + openstack_resource.delete() + + +@with_openstack_resource(OpenstackFlavor) +def update(openstack_resource, args): + """ + Update openstack flavor by passing args dict that contains the info that + need to be updated + :param openstack_resource: instance of openstack flavor resource + :param args: dict of information need to be updated + """ + # TODO This need to be uncomment whenever openstack allow for update + # operation since the following actions are only supported + # https://git.io/fh93b + # args = reset_dict_empty_keys(args) + # openstack_resource.update(args) + raise NonRecoverableError( + 'Openstack SDK does not support flavor update operation') diff --git a/openstack_plugin/resources/compute/host_aggregate.py b/openstack_plugin/resources/compute/host_aggregate.py new file mode 100644 index 00000000..45640a12 --- /dev/null +++ b/openstack_plugin/resources/compute/host_aggregate.py @@ -0,0 +1,127 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Third party imports +from cloudify import ctx +from cloudify.exceptions import NonRecoverableError + +# Local imports +from openstack_sdk.resources.compute import OpenstackHostAggregate +from openstack_plugin.decorators import with_openstack_resource +from openstack_plugin.constants import (RESOURCE_ID, + HOST_AGGREGATE_OPENSTACK_TYPE) +from openstack_plugin.utils import add_resource_list_to_runtime_properties + + +@with_openstack_resource(OpenstackHostAggregate) +def create(openstack_resource): + """ + Create openstack host aggregate instance + :param openstack_resource: Instance of openstack host aggregate resource + """ + # First create host aggregate instance using the configuration provided + # by users when create cloudify node + created_resource = openstack_resource.create() + + # Set the "id" as a runtime property for the created host aggregate + ctx.instance.runtime_properties[RESOURCE_ID] = created_resource.id + + +@with_openstack_resource(OpenstackHostAggregate) +def set_metadata(openstack_resource): + """ + Configure host aggregate by adding metadata with created host aggregate + :param openstack_resource: Instance of openstack host aggregate resource + """ + + # Check to see if metadata is provided or not so that we can attach them + # to created host aggregate + if ctx.node.properties.get('metadata'): + # Metadata values should be in strong format + for key, value in ctx.node.properties['metadata'].iteritems(): + if not isinstance(value, basestring): + ctx.node.properties['metadata'][key] = unicode(value) + openstack_resource.set_metadata(ctx.node.properties['metadata']) + + +@with_openstack_resource(OpenstackHostAggregate) +def update(openstack_resource, args): + """ + Update openstack host aggregate by passing args dict that contains + the info that need to be updated + :param openstack_resource: Instance of openstack host aggregate resource + :param dict args: dict of information need to be updated + """ + # TODO This need to be uncomment whenever openstack allow for update + # operation since the following actions are only supported + # https://git.io/fhSFH + # args = reset_dict_empty_keys(args) + # openstack_resource.update(args) + raise NonRecoverableError( + 'Openstack SDK does not support host aggregate update operation') + + +@with_openstack_resource(OpenstackHostAggregate) +def list_aggregates(openstack_resource): + """ + List openstack host aggregate + :param openstack_resource: Instance of openstack host aggregate resource. + """ + aggregates = openstack_resource.list() + add_resource_list_to_runtime_properties(HOST_AGGREGATE_OPENSTACK_TYPE, + aggregates) + + +@with_openstack_resource(OpenstackHostAggregate) +def delete(openstack_resource): + """ + Delete host aggregate resource + :param openstack_resource: Instance of openstack host aggregate resource. + """ + openstack_resource.delete() + + +@with_openstack_resource(OpenstackHostAggregate) +def add_hosts(openstack_resource, hosts): + """ + Add hosts to an aggregate + :param openstack_resource: Instance of openstack host aggregate resource. + :param list hosts: List of host strings that should be added to the host + aggregate resource + """ + if isinstance(hosts, list): + for host in hosts: + # Add host to the target host aggregate + openstack_resource.add_host(host) + else: + raise NonRecoverableError( + 'invalid data type {0} for hosts'.format(type(hosts))) + + +@with_openstack_resource(OpenstackHostAggregate) +def remove_hosts(openstack_resource, hosts): + """ + Remove hosts from an aggregate + :param openstack_resource: Instance of openstack host aggregate resource. + :param list hosts: List of host strings that should be removed from host + aggregate resource + """ + if isinstance(hosts, list): + for host in hosts: + # Add host to the target host aggregate + openstack_resource.remove_host(host) + else: + raise NonRecoverableError( + 'invalid data type {0} for hosts'.format(type(hosts))) diff --git a/openstack_plugin/resources/compute/image.py b/openstack_plugin/resources/compute/image.py new file mode 100644 index 00000000..f3ae54c5 --- /dev/null +++ b/openstack_plugin/resources/compute/image.py @@ -0,0 +1,81 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Third party imports +from cloudify import ctx + +# Local imports +from openstack_sdk.resources.images import OpenstackImage +from openstack_plugin.decorators import with_openstack_resource +from openstack_plugin.constants import (RESOURCE_ID, IMAGE_OPENSTACK_TYPE) +from openstack_plugin.utils import (validate_resource_quota, + reset_dict_empty_keys, + add_resource_list_to_runtime_properties) + + +@with_openstack_resource(OpenstackImage) +def create(openstack_resource): + # TODO Need to handle image upload to openstack when image_url is + # specified even if it is local url or remote url + # image_url = ctx.node.properties.get('image_url') + created_resource = openstack_resource.create() + ctx.instance.runtime_properties[RESOURCE_ID] = created_resource.id + + +@with_openstack_resource(OpenstackImage) +def start(openstack_resource): + # TODO This should be implemented in order to check if uploading image + # is done or not + pass + + +@with_openstack_resource(OpenstackImage) +def delete(openstack_resource): + # Delete the image resource after lookup the resource_id values + openstack_resource.delete() + + +@with_openstack_resource(OpenstackImage) +def update(openstack_resource, args): + """ + Update openstack image by passing args dict that contains the info that + need to be updated + :param openstack_resource: instance of openstack image resource + :param args: dict of information need to be updated + """ + args = reset_dict_empty_keys(args) + openstack_resource.update(args) + + +@with_openstack_resource(OpenstackImage) +def list_images(openstack_resource, query=None): + """ + List openstack images based on filters applied + :param openstack_resource: Instance of current openstack image + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. + """ + images = openstack_resource.list(query) + add_resource_list_to_runtime_properties(IMAGE_OPENSTACK_TYPE, images) + + +@with_openstack_resource(OpenstackImage) +def creation_validation(openstack_resource): + """ + This method is to check if we can create image resource in openstack + :param openstack_resource: Instance of current openstack image + """ + validate_resource_quota(openstack_resource, IMAGE_OPENSTACK_TYPE) + ctx.logger.debug('OK: image configuration is valid') diff --git a/openstack_plugin/resources/compute/keypair.py b/openstack_plugin/resources/compute/keypair.py new file mode 100644 index 00000000..9cf3a1ba --- /dev/null +++ b/openstack_plugin/resources/compute/keypair.py @@ -0,0 +1,68 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Third party imports +from cloudify import ctx + +# Local imports +from openstack_sdk.resources.compute import OpenstackKeyPair +from openstack_plugin.decorators import with_openstack_resource +from openstack_plugin.constants import (RESOURCE_ID, KEYPAIR_OPENSTACK_TYPE) +from openstack_plugin.utils import (validate_resource_quota, + add_resource_list_to_runtime_properties) + + +@with_openstack_resource(OpenstackKeyPair) +def create(openstack_resource): + """ + Create openstack keypair resource + :param openstack_resource: Instance of openstack keypair resource + """ + created_resource = openstack_resource.create() + ctx.instance.runtime_properties[RESOURCE_ID] = \ + created_resource.id + ctx.instance.runtime_properties['private_key'] = \ + created_resource.private_key + ctx.instance.runtime_properties['public_key'] = \ + created_resource.public_key + + +@with_openstack_resource(OpenstackKeyPair) +def delete(openstack_resource): + """ + Delete current openstack keypair + :param openstack_resource: instance of openstack keypair resource + """ + openstack_resource.delete() + + +@with_openstack_resource(OpenstackKeyPair) +def list_keypairs(openstack_resource): + """ + List openstack keypairs + :param openstack_resource: Instance of openstack keypair. + """ + keypairs = openstack_resource.list() + add_resource_list_to_runtime_properties(KEYPAIR_OPENSTACK_TYPE, keypairs) + + +@with_openstack_resource(OpenstackKeyPair) +def creation_validation(openstack_resource): + """ + This method is to check if we can create keypair resource in openstack + :param openstack_resource: Instance of current openstack keypair + """ + validate_resource_quota(openstack_resource, KEYPAIR_OPENSTACK_TYPE) + ctx.logger.debug('OK: key pair configuration is valid') diff --git a/openstack_plugin/resources/compute/server.py b/openstack_plugin/resources/compute/server.py new file mode 100644 index 00000000..4f81a409 --- /dev/null +++ b/openstack_plugin/resources/compute/server.py @@ -0,0 +1,1546 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Standard imports +import json +from Crypto.PublicKey import RSA +from Crypto.Cipher import PKCS1_v1_5 +import base64 + +# Third party imports +from cloudify import ctx +from openstack import exceptions +from cloudify.exceptions import (OperationRetry, + NonRecoverableError) + +# Local imports +from openstack_sdk.resources.compute import OpenstackServer +from openstack_sdk.resources.compute import OpenstackKeyPair +from openstack_sdk.resources.images import OpenstackImage +from openstack_sdk.resources.volume import OpenstackVolume +from openstack_sdk.resources.networks import OpenstackPort +from openstack_plugin.decorators import with_openstack_resource +from openstack_plugin.constants import (RESOURCE_ID, + OPENSTACK_RESOURCE_UUID, + SERVER_STATUS_ACTIVE, + SERVER_STATUS_SHUTOFF, + SERVER_STATUS_REBOOT, + SERVER_STATUS_HARD_REBOOT, + SERVER_STATUS_UNKNOWN, + SERVER_STATUS_ERROR, + SERVER_TASK_DELETE, + SERVER_TASK_STOP, + SERVER_TASK_START, + SERVER_TASK_RESTORE_STATE, + SERVER_TASK_BACKUP_DONE, + SERVER_OPENSTACK_TYPE, + SERVER_GROUP_NODE_TYPE, + SERVER_REBOOT_HARD, + SERVER_REBOOT_SOFT, + SERVER_ACTION_STATUS_PENDING, + SERVER_ACTION_STATUS_DONE, + SERVER_REBUILD_SPAWNING_STATUS, + SERVER_REBUILD_STATUS, + SERVER_TASK_STATE, + SERVER_INTERFACE_IDS, + SERVER_ADMIN_PASSWORD, + IMAGE_UPLOADING_PENDING, + IMAGE_STATUS_ACTIVE, + IMAGE_UPLOADING, + INSTANCE_OPENSTACK_TYPE, + VOLUME_DEVICE_NAME_PROPERTY, + VOLUME_OPENSTACK_TYPE, + VOLUME_STATUS_IN_USE, + VOLUME_STATUS_AVAILABLE, + VOLUME_ERROR_STATUSES, + VOLUME_ATTACHMENT_TASK, + VOLUME_DETACHMENT_TASK, + VOLUME_ATTACHMENT_ID, + VOLUME_BOOTABLE, + KEYPAIR_NODE_TYPE, + PORT_OPENSTACK_TYPE, + KEYPAIR_OPENSTACK_TYPE, + NETWORK_OPENSTACK_TYPE, + OPENSTACK_PORT_ID, + OPENSTACK_NETWORK_ID, + OPENSTACK_TYPE_PROPERTY, + USE_EXTERNAL_RESOURCE_PROPERTY) + +from openstack_plugin.utils import \ + (handle_userdata, + validate_resource_quota, + wait_until_status, + add_resource_list_to_runtime_properties, + find_relationship_by_node_type, + find_openstack_ids_of_connected_nodes_by_openstack_type, + reset_dict_empty_keys, + get_resource_id_from_runtime_properties, + get_snapshot_name, + generate_attachment_volume_key, + assign_resource_payload_as_runtime_properties) + + +def _stop_server(server): + """ + Stop server instance + :param server: Instance of openstack resource (OpenstackServer) + """ + server_resource = server.get() + if server_resource.status != SERVER_STATUS_SHUTOFF: + # Trigger stop server API only if it is not stopped before + if SERVER_TASK_STOP not in ctx.instance.runtime_properties: + server.stop() + ctx.instance.runtime_properties[SERVER_TASK_STOP]\ + = SERVER_ACTION_STATUS_PENDING + + # Get the server instance to check the status of the server + server_resource = server.get() + if server_resource.status != SERVER_STATUS_SHUTOFF: + raise OperationRetry(message='Server has {} state.'.format( + server_resource.status), retry_after=30) + + else: + ctx.logger.info('Server {0} is already stopped' + ''.format(server.resource_id)) + ctx.instance.runtime_properties[SERVER_TASK_STOP] \ + = SERVER_ACTION_STATUS_DONE + else: + ctx.logger.info('Server {0} is already stopped' + ''.format(server.resource_id)) + ctx.instance.runtime_properties[SERVER_TASK_STOP]\ + = SERVER_ACTION_STATUS_DONE + + +def _start_server(server): + """ + Start server instance + :param server: Instance of openstack resource (OpenstackServer) + """ + server_resource = server.get() + if server_resource.status != SERVER_STATUS_ACTIVE: + # Trigger stop server API only if it is not stopped before + if SERVER_TASK_START not in ctx.instance.runtime_properties: + server.start() + ctx.instance.runtime_properties[SERVER_TASK_START]\ + = SERVER_ACTION_STATUS_PENDING + + # Get the server instance to check the status of the server + server = server.get() + if server.status != SERVER_STATUS_ACTIVE: + raise OperationRetry(message='Server has {} state.'.format( + server.status), retry_after=30) + + else: + ctx.logger.info('Server is already started') + ctx.instance.runtime_properties[SERVER_TASK_START] \ + = SERVER_ACTION_STATUS_DONE + + else: + ctx.logger.info('Server is already started') + ctx.instance.runtime_properties[SERVER_TASK_START]\ + = SERVER_ACTION_STATUS_DONE + + +def _set_server_ips_runtime_properties(server): + """ + Populate required runtime properties from server in order to have all + the information related to ips + :param server: instance of openstack server + `~openstack.compute.v2.server.Server + """ + addresses = server.addresses + if not addresses: + return None + + ipv4_addresses = [] + ipv6_addresses = [] + + for network, address_object in addresses.iteritems(): + for address in address_object: + # ip config + ipv4 = dict() + ipv4['addr'] = address['addr'] + ipv4['type'] = address['OS-EXT-IPS:type'] + + # Check where `ip_config` should be added + if address['version'] == 4: + ipv4_addresses.append(ipv4) + elif address['version'] == 6: + ipv6_addresses.append(address['addr']) + + # Check if access_ipv4 is set or not + if server.access_ipv4: + ctx.instance.runtime_properties['access_ipv4'] = server.access_ipv4 + + # Check if access_ipv6 is set or not + if server.access_ipv6: + ctx.instance.runtime_properties['access_ipv6'] = server.access_ipv6 + + # If "ipv4_addresses" is only contains one item, them we need to check + # both private/public ip in order to set them as part of runtime_properties + for ipv4 in ipv4_addresses: + ip = ipv4['addr'] + + # Only set the first "ip" as runtime property + if ipv4['type'] == 'fixed'\ + and 'ip' not in ctx.instance.runtime_properties: + ctx.instance.runtime_properties['ip'] = ip + + # Only set the first "public_ip_address" as runtime property + elif ipv4['type'] == 'floating'\ + and 'public_ip_address' not in ctx.instance.runtime_properties: + ctx.instance.runtime_properties['public_ip_address'] = ip + + for ipv6 in ipv6_addresses: + ip_v6 = ipv6['addr'] + + # Only set the first "ipv6" as runtime property + if ipv6['type'] == 'fixed' \ + and 'ipv6' not in ctx.instance.runtime_properties: + ctx.instance.runtime_properties['ipv6'] = ip_v6 + + # Only set the first "public_ip6_address" as runtime property + elif ipv6['type'] == 'floating'\ + and 'public_ip6_address' not in\ + ctx.instance.runtime_properties: + ctx.instance.runtime_properties['public_ip6_address'] = ip_v6 + + # Check to see if "use_public_ip" is set or not in order to update the + # "ip" to use the public address + if ctx.node.properties.get('use_public_ip'): + pip = ctx.instance.runtime_properties.get('public_ip_address') + if pip: + ctx.instance.runtime_properties['ip'] = pip + + elif ctx.node.properties.get('use_ipv6_ip', False) and ipv6_addresses: + ip_v6 = ctx.instance.runtime_properties['ipv6'] + ctx.instance.runtime_properties['ip'] = ip_v6 + + # Get list of all ipv4 associated with server + ipv4_list = map(lambda ipv4_conf: ipv4_conf['addr'], ipv4_addresses) + + # Get list of all ipv6 associated with server + ipv6_list = map(lambda ipv6_conf: ipv6_conf['addr'], ipv6_addresses) + + ctx.instance.runtime_properties['ipv4_addresses'] = ipv4_list + ctx.instance.runtime_properties['ipv6_addresses'] = ipv6_list + + +def _log_snapshot_message(resource_id, + snapshot_name, + snapshot_incremental): + """ + Log message for backup operation + :param str resource_id: Server resource id + :param str snapshot_name: Server snapshot name + :param bool snapshot_incremental: Flag to create an incremental snapshots + or full backup + """ + # Decide what is the backup type + backup_type = 'snapshot' if snapshot_incremental else 'backup' + + # Format message to be logged when applying this task + backup_msg = 'Apply {0} {1} for {2}' \ + ''.format(backup_type, snapshot_name, resource_id) + + # Log message when start the snapshot restore operation + ctx.logger.info(backup_msg) + + +def _handle_server_group(openstack_resource): + """ + Associate server with server group if it is provided via the + configuration in order to prepare and send them with the request + :param openstack_resource: instance of openstack resource (OpenstackServer) + """ + server_group_rel = \ + find_relationship_by_node_type(ctx.instance, SERVER_GROUP_NODE_TYPE) + + if server_group_rel: + server_group_id = \ + get_resource_id_from_runtime_properties(server_group_rel.target) + + scheduler_hints = \ + openstack_resource.config.get('scheduler_hints', {}) + scheduler_hints['group'] = server_group_id + openstack_resource.config['scheduler_hints'] = scheduler_hints + + +def _handle_generate_snapshot(server, + snapshot_name, + snapshot_type, + snapshot_rotation, + snapshot_incremental): + """ + This method will generate snapshot for server + :param server: instance of openstack resource (OpenstackServer) + :param str snapshot_name: Snapshot name + :param str snapshot_type: Snapshot type e.g (daily, weekly) + :param int snapshot_rotation: Snapshot rotation period + :param bool snapshot_incremental: Flag to create an incremental snapshots + or full backup + """ + + # # we save backupstate for get last state of creation + backup_done = ctx.instance.runtime_properties.get(SERVER_TASK_BACKUP_DONE) + if not backup_done: + if not snapshot_incremental: + server.backup(snapshot_name, snapshot_type, snapshot_rotation) + ctx.logger.info( + 'Server backup {0} creation started'.format(snapshot_name)) + else: + server.create_image(snapshot_name) + ctx.logger.info('Server snapshot {} creation started' + .format(snapshot_name)) + + # Set initial value for backup status + ctx.instance.runtime_properties[SERVER_TASK_BACKUP_DONE] \ + = SERVER_ACTION_STATUS_PENDING + + # Wait for finish upload + is_finished = \ + _check_finished_server_task(server, + [IMAGE_UPLOADING, + IMAGE_UPLOADING_PENDING]) + + if is_finished: + ctx.instance.runtime_properties[SERVER_TASK_BACKUP_DONE]\ + = SERVER_ACTION_STATUS_DONE + + +def _handle_snapshot_restore(server, image_id, snapshot_name): + """ + This method will handle the actual snapshot restore for certain image + :param server: instance of openstack server resource (OpenstackServer) + :param str image_id: Image id that should we restore from + :param str snapshot_name: Snapshot name + """ + # Get the restore state + restore_state =\ + ctx.instance.runtime_properties.get(SERVER_TASK_RESTORE_STATE) + + # Get the server status in order to decide to stop it or not + server_status = ctx.instance.runtime_properties.get(SERVER_TASK_STOP) + + # If restore is not set then we need to stop it and then try to rebuild + # the server after server stopped successfully + if not restore_state: + # Stop server before rebuild it + _stop_server(server) + + # Get the server status in order to decide to stop it or not + server_status = ctx.instance.runtime_properties.get(SERVER_TASK_STOP) + + # Only continue to next step if the server status is actually + # stopped, so that we can rebuild the server + if server_status == SERVER_ACTION_STATUS_DONE: + ctx.logger.info( + 'Rebuild {0} with {1}'.format( + server.resource_id, snapshot_name) + ) + + # Rebuild server after server stopped successfully + server.rebuild(image=image_id) + + # Set the initial status of restore state + ctx.instance.runtime_properties[SERVER_TASK_RESTORE_STATE] \ + = SERVER_ACTION_STATUS_PENDING + + # Only check this logic if the server is already stopped + if server_status == SERVER_ACTION_STATUS_DONE: + # Check if the rebuild task is done or not + is_finished = \ + _check_finished_server_task(server, + [SERVER_REBUILD_SPAWNING_STATUS]) + + if is_finished: + ctx.instance.runtime_properties[SERVER_TASK_RESTORE_STATE]\ + = SERVER_REBUILD_STATUS + + # Try to start server to be available for usage + _start_server(server) + + server_status = ctx.instance.runtime_properties[SERVER_TASK_START] + if server_status == SERVER_ACTION_STATUS_DONE: + ctx.instance.runtime_properties[SERVER_TASK_RESTORE_STATE]\ + = SERVER_ACTION_STATUS_DONE + + +def _get_image(image_resource, snapshot_name): + """ + Get target image based on its name (snapshot name) + :param image_resource: instance of openstack image resource + (OpenstackImage) + :param str snapshot_name: The snapshot name + :return: instance of openstack image openstack.compute.v2.image.ImageDetail + """ + for image in image_resource.list(query={'name': snapshot_name}): + ctx.logger.info('Found image {0}'.format(repr(image))) + if image.name == snapshot_name: + return image + + return None + + +def _check_finished_server_task(server_resource, waiting_list): + """ + Check if the current server task is done or not + :param server_resource: instance of openstack server resource + (OpenstackServer) + :param waiting_list: list of status that should be checked on + :return: True if task is done, otherwise this should be retired again + """ + ctx.logger.info("Check server task state....") + + server = server_resource.get() + state = getattr(server, SERVER_TASK_STATE) + if state not in waiting_list: + return True + + return ctx.operation.retry( + message='Server has {0}/{1} state.' + ''.format(server.status, state), retry_after=30) + + +def _get_bootable_indexed_volumes(mapping_devices): + """ + This method will retrieve all bootable devices from mapping device list + in order to determine which device will be marked as bootable to the server + :param list mapping_devices: List of `block_device_mapping_v2` object + which is part of server config object + :return: List of bootable indexed volumes + """ + bootable_devices = None + if mapping_devices: + # Get the bootable devices which have boot_index specified + bootable_devices = [ + device for device in mapping_devices if device.get('boot_index') + ] + # Check if there are bootable indexed volumes + if bootable_devices: + # Sort them in descending order in order to track the last index + # of bootable device + bootable_devices = sorted(bootable_devices, + reverse=True, + key=lambda x: x.get('boot_index')) + + return bootable_devices + + +def _get_boot_volume_targets(): + """ + This method will lookup all volume bootable targets associated with + servers + :return: This will return list of all target volume nodes associated + with server so that we can use them for define bootable devices + """ + targets = [] + for rel in ctx.instance.relationships: + # Get runtime properties for target instance + runtime_properties = rel.target.instance.runtime_properties + # Check if the target instance openstack type is volume type and it + # has bootable runtime property set on the target volume instance + if runtime_properties.get(OPENSTACK_TYPE_PROPERTY)\ + == VOLUME_OPENSTACK_TYPE \ + and runtime_properties.get(VOLUME_BOOTABLE): + + # Add target to the list + targets.append(rel.target) + + return targets + + +def _update_ports_config(server_config): + """ + This method will try to update server config with port configurations + using the relationships connected with server node + :param dict server_config: The server configuration required in order to + create the server instance using Openstack API + """ + # Check to see if the network dict is provided on the server config + # properties + networks = server_config.get('networks', []) + # Check if the server has already ports associated with its config so + # that we can merge the different ports together + server_ports = \ + [ + network[PORT_OPENSTACK_TYPE] + for network in networks if network.get(PORT_OPENSTACK_TYPE) + ] + + # Get the ports from relationships if they are existed + port_ids = find_openstack_ids_of_connected_nodes_by_openstack_type( + ctx, PORT_OPENSTACK_TYPE) + + ports_to_add = [] + + # if both are empty then server is not providing ports connection + # neither via node properties nor via relationships and this will be + # valid only one network created for the current tenant, the server will + # attach automatically to that network + if not (server_ports or port_ids): + return + # Try to merge them + elif port_ids and server_ports: + raise NonRecoverableError('Server can\'t both have the ' + '"networks" property and be ' + 'connected to a network via a ' + 'relationship at the same time') + + # Prepare the ports that should be added to the server networks + for port_id in port_ids: + ports_to_add.append({'port': port_id}) + + # If server is not associated with any networks then we need to create + # new networks object and attach port to it + if not networks: + server_config['networks'] = ports_to_add + # If server already has networks object then we should update it with + # the new ports that should be added to the server + elif networks and isinstance(networks, list) and ports_to_add: + server_config['networks'].extend(ports_to_add) + + +def _update_bootable_volume_config(server_config): + """ + This method will help to get volume info from relationship + :param server_config: The server configuration required in order to + create the server instance using Openstack API + """ + mapping_devices = server_config.get('block_device_mapping_v2', []) + + # Filter and get the uuids volume from block device mapping + volume_uuids = [ + device['uuid'] for device in mapping_devices if device.get('uuid') + ] + # Get the indexed bootable devices + bootable_devices = _get_bootable_indexed_volumes(mapping_devices) + + # Get the highest index from bootable devices + boot_index = \ + bootable_devices[0]['boot_index'] if bootable_devices else None + + bootable_rel_volumes = [] + bootable_rel_uuids = [] + # Get the targets volume connected to the server + volume_targets = _get_boot_volume_targets() + for volume_target in volume_targets: + resource_config = volume_target.node.properties.get('resource_config') + volume_uuid = volume_target.instance.runtime_properties[RESOURCE_ID] + + # boot_index could be 0 and we do not need to valuate it as false + # condition + if boot_index is None: + boot_index = 0 + else: + boot_index += 1 + volume_device = { + 'boot_index': boot_index, + 'uuid': volume_uuid, + 'volume_size': resource_config.get('size'), + 'device_name': volume_target.node.properties.get('device_name'), + 'source_type': 'volume', + 'delete_on_termination': False, + } + bootable_rel_volumes.append(volume_device) + bootable_rel_uuids.append(volume_uuid) + + # if both are empty then server is not providing volumes connection + # neither via node properties nor via relationships + if not (bootable_rel_uuids or volume_uuids): + return + # Try to merge them + elif bootable_rel_uuids and volume_uuids: + raise NonRecoverableError('Server can\'t both have the ' + '"block_device_mapping_v2" property and be ' + 'connected to a volume via a ' + 'relationship at the same time') + + elif bootable_rel_uuids and not volume_uuids: + mapping_devices = bootable_rel_volumes + + server_config['block_device_mapping_v2'] = mapping_devices + + +def _update_keypair_config(server_config): + """ + This method will try to get key pair info connected with server node if + there is any relationships + :param server_config: The server configuration required in order to + create the server instance using Openstack API + """ + # Get the key name from server if it exists + server_keyname = server_config.get('key_name') + + # Get the keyname from relationship if any + rel_keyname = find_openstack_ids_of_connected_nodes_by_openstack_type( + ctx, KEYPAIR_OPENSTACK_TYPE) + # If server have two keyname from server node and from relationship then + # we should raise error + rel_keyname = rel_keyname[0] if rel_keyname else None + if server_keyname and rel_keyname: + raise NonRecoverableError('Server can\'t both have the ' + '"key_name" property and be ' + 'connected to a keypair via a ' + 'relationship at the same time') + + # At this point, only one of the keys will be set + key_name = server_keyname or rel_keyname + if key_name: + server_config['key_name'] = key_name + + +def _update_networks_config(server_config): + """ + This method will try to update server config with network configurations + using the relationships connected with server node + :param dict server_config: The server configuration required in order to + create the server instance using Openstack API + """ + + # Check to see if the network dict is provided on the server config + # properties + networks = server_config.get('networks', []) + # Check if the server has already networks associated with its config so + # that we can merge the different networks together + server_networks = \ + [ + network[OPENSTACK_RESOURCE_UUID] + for network in networks if network.get(OPENSTACK_RESOURCE_UUID) + ] + + # Get the networks from relationships if they are existed + network_ids = find_openstack_ids_of_connected_nodes_by_openstack_type( + ctx, NETWORK_OPENSTACK_TYPE) + + networks_to_add = [] + + # if both are empty then server is not providing ports connection + # neither via node properties nor via relationships and this will be + # valid only one network created for the current tenant, the server will + # attach automatically to that network + if not (server_networks or network_ids): + return + # Try to merge them + elif server_networks and network_ids: + raise NonRecoverableError('Server can\'t both have the ' + '"networks" property and be ' + 'connected to a network via a ' + 'relationship at the same time') + + # Prepare the network uuids that should be added to the server networks + for net_id in network_ids: + networks_to_add.append({OPENSTACK_RESOURCE_UUID: net_id}) + + # If server is not associated with any networks then we need to create + # new networks object and attach network to it + if not networks: + server_config['networks'] = networks_to_add + # If server already has networks object then we should update it with + # the new networks that should be added to the server + elif networks and isinstance(networks, list) and networks_to_add: + server_config['networks'].extend(networks_to_add) + + +def _update_server_config(server_config): + """ + This method will try to resolve if there are any nodes connected to the + server node and try to use the configurations from nodes in order to + help create server using these configurations + :param dict server_config: The server configuration required in order to + create the server instance using Openstack API + """ + # Check if there are some ports configuration via relationships in order + # to update server config + _update_ports_config(server_config) + + # Check if there are some networks configuration in order to update + # server config + _update_networks_config(server_config) + + # Check if there are some bootable volumes via relationships in order + # update server config + _update_bootable_volume_config(server_config) + + # Check if there is a key pair connected to the server via relationship + # so that we can update server config when create server instance + _update_keypair_config(server_config) + + +def _validate_external_server_networks(openstack_resource, ports, networks): + """ + This method will validate if we can attach ports and networks to an + external server + :param openstack_resource: An instance of OpenstackServer + :param ports: List of ports uuid need to validate against them + :param networks: List of networks uuid need to validate against them + """ + interfaces = openstack_resource.server_interfaces() + attached_ports = \ + [ + network[OPENSTACK_PORT_ID] + for network in interfaces if network.get(OPENSTACK_PORT_ID) + ] + + attached_networks = \ + [ + network[OPENSTACK_NETWORK_ID] + for network in interfaces if network.get(OPENSTACK_NETWORK_ID) + ] + + common_ports = set(attached_ports) & set(ports) + common_networks = set(attached_networks) & set(networks) + if common_networks or common_ports: + raise NonRecoverableError( + 'Several ports/networks already connected to external server ' + '{0}: Networks - {1}; Ports - {2}' + .format(openstack_resource.resource_id, + common_ports, + common_networks)) + + +def _connect_keypair_to_external_server(server): + """ + This method will validate if the connected keypair match the + key already associated with the external server + :param server: An instance of OpenstackServer + """ + + # Get list of ports associated with the external server + keypair_rel = \ + find_relationship_by_node_type(ctx.instance, KEYPAIR_NODE_TYPE) + + # Prepare keypair instance + keypair_instance = OpenstackKeyPair(client_config=server.client_config, + logger=ctx.logger) + + if not keypair_rel: + return + # Get the keypair id from target relationship + keypair_id = get_resource_id_from_runtime_properties(keypair_rel.target) + # Get the node properties from target node + keypair_node_properties = keypair_rel.target.node.properties + # Raise NonRecoverableError error if the keypair node is not an external + # resource + if not keypair_node_properties.get(USE_EXTERNAL_RESOURCE_PROPERTY): + raise NonRecoverableError( + 'Can\'t connect a new keypair node to a server node ' + 'with \'{0}\'=True'.format(USE_EXTERNAL_RESOURCE_PROPERTY)) + + keypair_instance.resource_id = keypair_id + keypair = keypair_instance.get() + if keypair_id != keypair.id: + raise NonRecoverableError( + 'Expected external resources server {0} and keypair {1} to be ' + 'connected'.format(server.id, keypair_id)) + + +def _connect_networks_to_external_server(openstack_resource): + """ + This method will try to connect networks to external server + :param openstack_resource: Instance Of OpenstackServer in order to + use it + """ + # Prepare two lists for connected ports/networks in order to save them + # as runtime properties so that we can remove them from server when run + # stop operation + added_interfaces = [] + + # Get list of ports associated with the external server + ports = \ + find_openstack_ids_of_connected_nodes_by_openstack_type( + ctx, PORT_OPENSTACK_TYPE) + + # Get list of ports associated with the external server + networks = \ + find_openstack_ids_of_connected_nodes_by_openstack_type( + ctx, NETWORK_OPENSTACK_TYPE) + + # Validate if we can connect external server to the "ports" & "networks" + _validate_external_server_networks(openstack_resource, ports, networks) + + # If we reach here that means we can connect ports & networks to the + # external server and the validation passed successfully + for port_id in ports: + ctx.logger.info('Attaching port {0}...'.format(port_id)) + interface = \ + openstack_resource.create_server_interface({'port_id': port_id}) + ctx.logger.info( + 'Successfully attached port {0} to device (server) id {1}.' + .format(port_id, openstack_resource.resource_id)) + added_interfaces.append(interface.id) + + # Check again the server after attaching the ports so that we can do + # another check if already checked networks + interfaces = openstack_resource.server_interfaces() + attached_networks = \ + [ + network[OPENSTACK_NETWORK_ID] + for network in interfaces if network.get(OPENSTACK_NETWORK_ID) + ] + + for net_id in networks: + if net_id not in attached_networks: + ctx.logger.info('Attaching network {0}...'.format(net_id)) + interface = \ + openstack_resource.create_server_interface({'net_id': net_id}) + ctx.logger.info( + 'Successfully attached network {0} to device (server) id {1}.' + .format(net_id, openstack_resource.resource_id)) + added_interfaces.append(interface.id) + else: + ctx.logger.info( + 'Skipping network {0} attachment, because it is already ' + 'attached to device (server) id {1}.' + .format(net_id, openstack_resource.resource_id)) + + # Check if there are interfaces added to the external server and add + # them as runtime properties + if added_interfaces: + ctx.instance.runtime_properties[SERVER_INTERFACE_IDS] =\ + added_interfaces + + # Set runtime properties for external server + server = openstack_resource.get() + _set_server_ips_runtime_properties(server) + + +def _connect_resources_to_external_server(openstack_resource): + """ + This method will try to connect resources to external server + :param openstack_resource: Instance Of OpenstackServer in order to + use it + """ + + # Try to connect networks to external server + _connect_networks_to_external_server(openstack_resource) + + # Validate external key pair connected to the external server + _connect_keypair_to_external_server(openstack_resource) + + # Assign payload to server + remote_server = openstack_resource.get() + assign_resource_payload_as_runtime_properties(ctx, + remote_server, + SERVER_OPENSTACK_TYPE) + + +def _disconnect_resources_from_external_server(openstack_resource): + """ + This method will disconnect networks from external server so that they + can be removed without any issue + :param openstack_resource: Instance Of OpenstackServer in order to + use it + """ + # Delete all interfaces added to the external server + if SERVER_INTERFACE_IDS in ctx.instance.runtime_properties: + interfaces = ctx.instance.runtime_properties.get( + SERVER_INTERFACE_IDS, []) + for interface in interfaces: + openstack_resource.delete_server_interface(interface) + ctx.logger.info( + 'Successfully detached network {0} to device (server) id {1}.' + .format(interface, openstack_resource.resource_id)) + + +def _get_server_private_key(): + """ + This method will check if server has connected to keypair + so that we can get the private key content to use it for decryption + operation for password generated when create server + :return (st) private_key: Private key content + """ + # Get the keyname from relationship if any + rel_keyname = \ + find_relationship_by_node_type(ctx.instance, KEYPAIR_NODE_TYPE) + if not rel_keyname: + return None + + # Try to get the private key from keypair instance + private_key = \ + rel_keyname.target.instance.runtime_properties.get('private_key') + if not private_key: + return None + return private_key + + +def _decrypt_password(password, private_key): + """ + This method will decrypt user password for server so that it can be used + later on + :param (str) password: Encrypted password + :param (str) private_key: Private key + :return (str) password: Return decrypted password + """ + # Check if both password and private ket are provided + if not (password or private_key): + raise NonRecoverableError('Password and private key must' + ' be both provided for password decryption') + + # Define variable to hold decrypted password + decrypted_password = '' + + # Import the private key so that we can use it to decrypt password + rsa_key = RSA.importKey(private_key) + rsa_key = PKCS1_v1_5.new(rsa_key) + + # Decode password to base 64 + encrypted_password = base64.b64decode(password) + + # Do the encryption process + chunk_size = 512 + offset = 0 + + # keep loop going as long as we have chunks to decrypt + while offset < len(encrypted_password): + # The encrypted password chunk + chunk_data = encrypted_password[offset: offset + chunk_size] + + # Append the decrypted password chunk to the overall decrypted + # decrypted password + error_decrypt = 'Error while trying to decrypt password' + decrypted_password += rsa_key.decrypt(chunk_data, error_decrypt) + + # Increase the offset by chunk size + offset += chunk_size + + return decrypted_password + + +def _get_user_password(openstack_resource): + """ + This method will get the server password as encrypted for the current + server + :param openstack_resource: Instance Of OpenstackServer in order to + use it + """ + if ctx.node.properties.get('use_password'): + # The current openstack sdk does not allow to send private key path + # when trying to lookup the password which means the password + # generated will be encrypted + res = openstack_resource.get_server_password() + password = json.loads(res.content) if res.content else None + password = password['password'] if password.get('password') else None + # If the password is not set then, again + if not password: + raise OperationRetry( + message='Waiting for server to post generated password') + else: + # Encrypted password, in order to decrypt it, decrypt it manually + private_key = _get_server_private_key() + password = _decrypt_password(password, private_key) + ctx.instance.runtime_properties[SERVER_ADMIN_PASSWORD] = password + ctx.logger.info('Server has been set with a password') + + +def _disconnect_security_group_from_server_ports(client_config, + server_payload, + security_group_id): + """ + This method will help to remove connection between port and security group + Because when we attach security group to a server that has multiple + ports connected to it, all the ports automatically are going to connect + to the security group + :param dict client_config: Openstack configuration required to connect + to API + :param dict server_payload: Server payload configuration from openstack + :param str security_group_id: Security group ID + """ + if not server_payload: + return + + networks = server_payload.get('networks', []) + server_ports = \ + [ + network[PORT_OPENSTACK_TYPE] + for network in networks if network.get(PORT_OPENSTACK_TYPE) + ] + for port_id in server_ports: + port = OpenstackPort(client_config=client_config, + logger=ctx.logger) + port.resource_id = port_id + remote_port = port.get() + port_security_groups = remote_port.security_group_ids + if security_group_id in remote_port.security_group_ids: + port_security_groups.remove(security_group_id) + + port.update({ + 'security_groups': port_security_groups + }) + + +@with_openstack_resource( + OpenstackServer, + existing_resource_handler=_connect_resources_to_external_server) +def create(openstack_resource): + """ + Create openstack server instance + :param openstack_resource: instance of openstack server resource + """ + blueprint_user_data = openstack_resource.config.get('user_data') + user_data = handle_userdata(blueprint_user_data) + + # Handle user data + if user_data: + openstack_resource.config['user_data'] = user_data + + # Update server config by depending on relationships + _update_server_config(openstack_resource.config) + + # Handle server group + _handle_server_group(openstack_resource) + + # Create resource + created_resource = openstack_resource.create() + + # Set the "id" as a runtime property for the created server + ctx.instance.runtime_properties[RESOURCE_ID] = created_resource.id + + # Update the resource_id with the new "id" returned from API + openstack_resource.resource_id = created_resource.id + + # Assign runtime properties for server + assign_resource_payload_as_runtime_properties(ctx, + created_resource, + SERVER_OPENSTACK_TYPE) + + +@with_openstack_resource(OpenstackServer) +def configure(openstack_resource): + """ + Populate required runtime properties for server when it is in active status + :param openstack_resource: instance of openstack server resource + """ + # Get the details for the created servers instance + server = openstack_resource.get() + + # Get the server status + status = server.status + if status == SERVER_STATUS_ACTIVE: + ctx.logger.info('Server {0} is already started'.format(server.id)) + _set_server_ips_runtime_properties(server) + _get_user_password(openstack_resource) + return + elif status == SERVER_STATUS_ERROR: + raise NonRecoverableError( + 'Server {0} cannot be started, ' + 'because it is on error state'.format(server.id)) + else: + raise OperationRetry( + message='Waiting for server to be in {0} state but is in {1} ' + 'state. Retrying...'.format(SERVER_STATUS_ACTIVE, status)) + + +@with_openstack_resource(OpenstackServer) +def delete(openstack_resource): + """ + Delete current openstack server + :param openstack_resource: instance of openstack server resource + """ + # Get the details for the created server instance + try: + server = openstack_resource.get() + except exceptions.ResourceNotFound: + msg = 'Server {0} is not found'.format(openstack_resource.resource_id) + if SERVER_TASK_DELETE not in ctx.instance.runtime_properties: + ctx.logger.error(msg) + raise NonRecoverableError(msg) + + ctx.logger.info('Server {0} is deleted successfully' + .format(openstack_resource.resource_id)) + return + + # Check if delete operation triggered or not before + if SERVER_TASK_DELETE not in ctx.instance.runtime_properties: + openstack_resource.delete() + ctx.instance.runtime_properties[SERVER_TASK_DELETE] = True + + ctx.logger.info('Waiting for server "{0}" to be deleted.' + ' current status: {1}'.format(server.id, server.status)) + + raise OperationRetry(message='Server has {0} state.'.format(server.status)) + + +@with_openstack_resource( + OpenstackServer, + existing_resource_handler=_disconnect_resources_from_external_server) +def stop(openstack_resource): + """ + Stop current openstack server + :param openstack_resource: instance of openstack server resource + """ + + # Clean any interfaces connected to the server + for interface in openstack_resource.server_interfaces(): + openstack_resource.delete_server_interface(interface.id) + ctx.logger.info('Successfully detached network' + ' {0} to device (server) id {1}.' + .format(interface, openstack_resource.resource_id)) + + # Stop server instance + _stop_server(openstack_resource) + + +@with_openstack_resource(OpenstackServer) +def reboot(openstack_resource, reboot_type='soft'): + """ + This operation task is to rebot the current instance of the server + :param openstack_resource: instance of openstack server resource + :param str reboot_type: The type of reboot to perform. + "HARD" and "SOFT" are the current options. + """ + if ctx.operation.retry_number == 0: + if reboot_type.upper() not in [SERVER_REBOOT_HARD, SERVER_REBOOT_SOFT]: + raise NonRecoverableError( + 'Unexpected reboot type: {}. ' + 'Valid values: SOFT or HARD.'.format(reboot_type)) + openstack_resource.reboot(reboot_type.upper()) + + # Get the details for the rebooted server instance + server = openstack_resource.get() + + if server.status in [SERVER_STATUS_REBOOT, + SERVER_STATUS_HARD_REBOOT, + SERVER_STATUS_UNKNOWN]: + return ctx.operation.retry( + message="Server has {0} state. Waiting.".format(server.status), + retry_after=30) + + elif server.status == SERVER_STATUS_ACTIVE: + ctx.logger.info( + 'Reboot operation finished in {} state.'.format(server.status)) + + elif server.status == SERVER_STATUS_ERROR: + raise NonRecoverableError( + 'Reboot operation finished in {} state.'.format( + server.status)) + + else: + raise NonRecoverableError( + 'Reboot operation finished in unexpected state: {}'.format( + server.state)) + + +@with_openstack_resource(OpenstackServer) +def suspend(openstack_resource): + """ + Suspend server + :param openstack_resource: instance of openstack server resource + """ + ctx.logger.info('Suspend VM {}'.format(openstack_resource.resource_id)) + openstack_resource.suspend() + + +@with_openstack_resource(OpenstackServer) +def resume(openstack_resource): + """ + Resume server + :param openstack_resource: instance of openstack server resource + """ + ctx.logger.info('Resume VM {}'.format(openstack_resource.resource_id)) + openstack_resource.resume() + + +@with_openstack_resource(OpenstackServer) +def snapshot_create(openstack_resource, **kwargs): + """ + Create server backup. + :param kwargs: snapshot information provided by workflow + :param openstack_resource: instance of openstack server resource + """ + ctx.logger.info('Create snapshot for {0}'.format( + openstack_resource.resource_id)) + + # Get snapshot information provided by workflow parameters + snapshot_name = kwargs.get('snapshot_name') + snapshot_rotation = None + if kwargs.get('snapshot_rotation'): + snapshot_rotation = int(kwargs['snapshot_rotation']) + + snapshot_type = kwargs.get('snapshot_type') + snapshot_incremental = kwargs.get('snapshot_incremental') + + # Generate snapshot name + snapshot_name = \ + get_snapshot_name('vm', snapshot_name, snapshot_incremental) + + # Create an instance if openstack image in order to check if the image + # already exists or not + image_resource = OpenstackImage( + client_config=openstack_resource.client_config, + logger=ctx.logger + ) + + # Try to lookup the image from openstack + retry_number = ctx.operation.retry_number + target_image = _get_image(image_resource, snapshot_name) + + # If retry_number == 0 and image exists then we should raise error, + # otherwise if retry_number exceeds 0 then that means the image is still + # uploading + if retry_number == 0 and target_image: + raise NonRecoverableError( + 'Snapshot {} already exists.'.format(snapshot_name)) + + # Handle snapshot here + _handle_generate_snapshot(openstack_resource, + snapshot_name, + snapshot_type, + snapshot_rotation, + snapshot_incremental) + + +@with_openstack_resource(OpenstackServer) +def snapshot_apply(openstack_resource, **kwargs): + """ + Restore server from backup | snapshot. + :param kwargs: snapshot information provided by workflow + :param openstack_resource: instance of openstack server resource + """ + snapshot_name = kwargs.get('snapshot_name') + snapshot_incremental = kwargs.get('snapshot_incremental') + + # Get the generated snapshot name + snapshot_name = \ + get_snapshot_name('vm', snapshot_name, snapshot_incremental) + + _log_snapshot_message(openstack_resource.resource_id, + snapshot_name, + snapshot_incremental) + + # Create an instance if openstack image in order to check if the image + # already exists or not + image_resource = OpenstackImage( + client_config=openstack_resource.client_config, + logger=ctx.logger + ) + + # Check if the image need to be restored is existed + target_image = _get_image(image_resource, snapshot_name) + if not target_image: + raise NonRecoverableError( + 'No snapshot found with name: {0}'.format(snapshot_name)) + + _handle_snapshot_restore(openstack_resource, + target_image.id, + snapshot_name) + + +@with_openstack_resource(OpenstackServer) +def snapshot_delete(openstack_resource, **kwargs): + """ + Delete server backup | snapshot. + :param kwargs: snapshot information provided by workflow + :param openstack_resource: instance of openstack server resource + """ + snapshot_name = kwargs.get('snapshot_name') + snapshot_incremental = kwargs.get('snapshot_incremental') + + # Get the generated snapshot name + snapshot_name = \ + get_snapshot_name('vm', snapshot_name, snapshot_incremental) + + # log the message for snapshot operation + _log_snapshot_message(openstack_resource.resource_id, + snapshot_name, + snapshot_incremental) + + # Create an instance if openstack image in order delete uploaded image + image_resource = OpenstackImage( + client_config=openstack_resource.client_config, + logger=ctx.logger + ) + + # Check if the image need to be deleted is existed + target_image = _get_image(image_resource, snapshot_name) + if not target_image: + raise NonRecoverableError( + 'No snapshot found with name: {0}'.format(snapshot_name)) + + if target_image.status == IMAGE_STATUS_ACTIVE: + image_resource.resource_id = target_image.id + image_resource.delete() + + # Check if the image need to be deleted is existed + target_image = _get_image(image_resource, snapshot_name) + if target_image: + return ctx.operation.retry( + message='{} is still alive' + ''.format(target_image.id), retry_after=30) + else: + # If image is remove then we need to reset the following + # runtime properties: + # - backup_done + # - restore_state + # - stop_server_task + # - start_server_task + + # The reason for reset the above runtime properties is because of + # the user want to start over again after running delete snapshot + # operation # "cloudify.interfaces.snapshot.delete" + for attr in [SERVER_TASK_BACKUP_DONE, + SERVER_TASK_RESTORE_STATE, + SERVER_TASK_STOP, + SERVER_TASK_START]: + + if attr in ctx.instance.runtime_properties: + del ctx.instance.runtime_properties[attr] + + +@with_openstack_resource(OpenstackServer) +def attach_volume(openstack_resource, **kwargs): + """ + This method will attach a volume to server + :param openstack_resource: instance of openstack server resource + :param kwargs: Additional information that could be provided via + operation task inputs + """ + # Get volume id from source instance + volume_id = get_resource_id_from_runtime_properties(ctx.source) + # Get the device property from volume node + device = ctx.source.node.properties[VOLUME_DEVICE_NAME_PROPERTY] + + # Prepare volume attachment config required for adding attaching volume + # to certain server + attachment_config = { + 'volume_id': volume_id, + 'device': device if device != 'auto' else None + } + + server_node_id = ctx.target.instance.id + volume_node_id = ctx.source.instance.id + attachment_task_key = \ + generate_attachment_volume_key(VOLUME_ATTACHMENT_TASK, + volume_node_id, + server_node_id) + + attachment_volume_id_key = \ + generate_attachment_volume_key(VOLUME_ATTACHMENT_ID, + volume_node_id, + server_node_id) + # Create volume attachment + if attachment_task_key not in ctx.target.instance.runtime_properties: + attachment = \ + openstack_resource.create_volume_attachment(attachment_config) + ctx.target.instance.runtime_properties[attachment_task_key] = True + ctx.target.instance.runtime_properties[attachment_volume_id_key] = \ + attachment.id + + # Prepare volume instance in order to check the current status of the + # volume being attached to the server + volume_instance = OpenstackVolume( + client_config=openstack_resource.client_config, + logger=ctx.logger) + volume_instance.resource_id = volume_id + + # Wait until final status of the attached volume becomes in-use so that + # we can tell that the volume attachment is ready to use by the server + volume = wait_until_status(volume_instance, + VOLUME_OPENSTACK_TYPE, + VOLUME_STATUS_IN_USE, + VOLUME_ERROR_STATUSES) + # If the volume is ready, that means we do not need to keep the task + # status anymore + if volume: + del ctx.target.instance.runtime_properties[attachment_task_key] + + +@with_openstack_resource(OpenstackServer) +def detach_volume(openstack_resource, **kwargs): + """ + This method will detach a volume to server + :param openstack_resource: instance of openstack server resource + :param kwargs: Additional information that could be provided via + operation task inputs + """ + # Get volume id from source instance + volume_id = get_resource_id_from_runtime_properties(ctx.source) + + # Get the ids for node, in order to generate the attachment volume key + server_node_id = ctx.target.instance.id + volume_node_id = ctx.source.instance.id + + # Attachment volume key + attachment_volume_id_key = \ + generate_attachment_volume_key(VOLUME_ATTACHMENT_ID, + volume_node_id, + server_node_id) + + # Try to lookup the attachment volume id + attachment_volume_id = \ + ctx.target.instance.runtime_properties.get(attachment_volume_id_key) + if not attachment_volume_id: + raise NonRecoverableError( + 'Attachment volume id {0} is missing' + ' from runtime properties '.format(attachment_volume_id_key)) + + # Detachment volume task key + detachment_task_key = \ + generate_attachment_volume_key(VOLUME_DETACHMENT_TASK, + volume_node_id, + server_node_id) + + # Detach volume from server + if detachment_task_key not in ctx.target.instance.runtime_properties: + openstack_resource.delete_volume_attachment(attachment_volume_id) + ctx.target.instance.runtime_properties[detachment_task_key] = True + + # Prepare volume instance in order to check the current status of the + # volume being attached to the server + volume_instance = OpenstackVolume( + client_config=openstack_resource.client_config, + logger=ctx.logger) + volume_instance.resource_id = volume_id + + # Wait until final status of the attached volume becomes in-use so that + # we can tell that the volume attachment is ready to use by the server + volume = wait_until_status(volume_instance, + VOLUME_OPENSTACK_TYPE, + VOLUME_STATUS_AVAILABLE, + VOLUME_ERROR_STATUSES) + + # If the volume is available, that means we do not need to keep the task + # status anymore + if volume: + del ctx.target.instance.runtime_properties[detachment_task_key] + + +@with_openstack_resource(OpenstackServer) +def connect_floating_ip(openstack_resource, floating_ip, fixed_ip=''): + """ + This method will connect floating ip to server + :param openstack_resource: Instance of openstack server resource + :param str floating_ip: The floating IP + :param str fixed_ip: The fixed IP address to be associated with the + floating IP address. Used when the server is connected to multiple + networks. + """ + if not floating_ip: + raise NonRecoverableError('floating_ip is required in order to ' + 'connect floating ip to server {0}' + ''.format(openstack_resource.resource_id)) + + fixed_ip = fixed_ip or None + openstack_resource.add_floating_ip_to_server(floating_ip, + fixed_ip=fixed_ip) + + +@with_openstack_resource(OpenstackServer) +def disconnect_floating_ip(openstack_resource, floating_ip): + """ + This will disconnect floating ip address from server + :param openstack_resource: Instance of openstack server resource + :param floating_ip: The floating IP connetced to the server which should + be disconnected + """ + if not floating_ip: + raise NonRecoverableError('floating_ip is required in order to ' + 'disconnect floating ip from server {0}' + ''.format(openstack_resource.resource_id)) + + openstack_resource.remove_floating_ip_from_server(floating_ip) + + +@with_openstack_resource(OpenstackServer) +def connect_security_group(openstack_resource, security_group_id): + """ + This method will connect security group to server + :param openstack_resource: Instance of openstack server resource + :param str security_group_id: The ID of a security group + """ + if not security_group_id: + raise NonRecoverableError('security_group_id is required in order to ' + 'connect security group to server {0}' + ''.format(openstack_resource.resource_id)) + + openstack_resource.add_security_group_to_server(security_group_id) + + +@with_openstack_resource(OpenstackServer) +def disconnect_security_group(openstack_resource, security_group_id): + """ + This will disconnect floating ip address from server + :param openstack_resource: Instance of openstack server resource + :param security_group_id: The ID of a security group + """ + if not security_group_id: + raise NonRecoverableError('security_group_id is required in order to ' + 'disconnect security group from server {0}' + ''.format(openstack_resource.resource_id)) + + openstack_resource.remove_security_group_from_server(security_group_id) + + # Get the payload for server from runtime properties in order to get the + # ports information attached to the server which will automatically + # reference the disconnected security group which will cause an issue + # when trying to delete security group, so we should break the + # connection between the ports attached to the server and the security + # group + server_payload = \ + ctx.source.instance.runtime_properties.get(SERVER_OPENSTACK_TYPE) + if server_payload: + _disconnect_security_group_from_server_ports( + openstack_resource.client_config, + server_payload, + security_group_id + ) + + +@with_openstack_resource(OpenstackServer) +def update(openstack_resource, args): + """ + Update openstack server by passing args dict that contains the info that + need to be updated + :param openstack_resource: instance of openstack server resource + :param args: dict of information need to be updated + """ + args = reset_dict_empty_keys(args) + updated_server = openstack_resource.update(args) + # Update the runtime properties for the updated server + assign_resource_payload_as_runtime_properties(ctx, + updated_server, + SERVER_OPENSTACK_TYPE) + + +@with_openstack_resource(OpenstackServer) +def list_servers(openstack_resource, + query=None, + all_projects=False, + details=True): + """ + List openstack servers based on filters applied + :param openstack_resource: Instance of current openstack server + :param kwargs query: Optional query parameters to be sent to limit + the servers being returned. + :param bool all_projects: Flag to request servers be returned from all + projects, not just the currently scoped one. + :param bool details: When set to ``False`` + :class:`~openstack.compute.v2.server.Server` instances + will be returned. The default, ``True``, will cause + :class:`~openstack.compute.v2.server.ServerDetail` + instances to be returned. + """ + servers = openstack_resource.list(details, all_projects, query) + add_resource_list_to_runtime_properties(SERVER_OPENSTACK_TYPE, servers) + + +@with_openstack_resource(OpenstackServer) +def creation_validation(openstack_resource): + """ + This method is to check if we can create server resource in openstack + :param openstack_resource: Instance of current openstack server + """ + validate_resource_quota(openstack_resource, INSTANCE_OPENSTACK_TYPE) + ctx.logger.debug('OK: server configuration is valid') diff --git a/openstack_plugin/resources/compute/server_group.py b/openstack_plugin/resources/compute/server_group.py new file mode 100644 index 00000000..89c0b434 --- /dev/null +++ b/openstack_plugin/resources/compute/server_group.py @@ -0,0 +1,81 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Third party imports +from cloudify import ctx +from cloudify.exceptions import NonRecoverableError + +# Local imports +from openstack_sdk.resources.compute import OpenstackServerGroup +from openstack_plugin.decorators import with_openstack_resource +from openstack_plugin.constants import (RESOURCE_ID, + SERVER_GROUP_OPENSTACK_TYPE) + +from openstack_plugin.utils import (validate_resource_quota, + add_resource_list_to_runtime_properties) + + +@with_openstack_resource(OpenstackServerGroup) +def create(openstack_resource): + """ + Create openstack server group resource + :param openstack_resource: Instance of openstack server group resource + """ + created_resource = openstack_resource.create() + ctx.instance.runtime_properties[RESOURCE_ID] = created_resource.id + + +@with_openstack_resource(OpenstackServerGroup) +def delete(openstack_resource): + """ + Delete current openstack server group + :param openstack_resource: instance of openstack server group resource + """ + # Delete the server group resource after lookup the resource_id values + openstack_resource.delete() + + +@with_openstack_resource(OpenstackServerGroup) +def update(openstack_resource, args): + """ + Update openstack server group by passing args dict that contains the info + that need to be updated + :param openstack_resource: instance of openstack server group resource + :param args: dict of information need to be updated + """ + # Update server group not support right now with openstack + raise NonRecoverableError( + 'openstack library does not support update server group') + + +@with_openstack_resource(OpenstackServerGroup) +def list_server_groups(openstack_resource, query=None): + """ + List openstack server groups + :param openstack_resource: Instance of openstack sever group. + """ + server_groups = openstack_resource.list(query) + add_resource_list_to_runtime_properties(SERVER_GROUP_OPENSTACK_TYPE, + server_groups) + + +@with_openstack_resource(OpenstackServerGroup) +def creation_validation(openstack_resource): + """ + This method is to check if we can create server group resource in openstack + :param openstack_resource: Instance of current openstack server group + """ + validate_resource_quota(openstack_resource, SERVER_GROUP_OPENSTACK_TYPE) + ctx.logger.debug('OK: server group configuration is valid') diff --git a/docs/_static/.gitkeep b/openstack_plugin/resources/identity/__init__.py similarity index 100% rename from docs/_static/.gitkeep rename to openstack_plugin/resources/identity/__init__.py diff --git a/openstack_plugin/resources/identity/project.py b/openstack_plugin/resources/identity/project.py new file mode 100644 index 00000000..e9d61c84 --- /dev/null +++ b/openstack_plugin/resources/identity/project.py @@ -0,0 +1,219 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Third party imports +from cloudify import ctx +from cloudify.exceptions import NonRecoverableError + +# Local imports +from openstack_sdk.resources.identity import (OpenstackProject, + OpenstackUser, + OpenstackRole) +from openstack_plugin.decorators import with_openstack_resource +from openstack_plugin.constants import (RESOURCE_ID, + PROJECT_OPENSTACK_TYPE, + IDENTITY_USERS, + IDENTITY_ROLES, + IDENTITY_QUOTA) +from openstack_plugin.utils import (validate_resource_quota, + reset_dict_empty_keys, + add_resource_list_to_runtime_properties) + + +def _assign_users(project_resource, users): + """ + Assign users to project + :param project_resource: project resource instance (OpenstackProject) + :param users: List of users that need to be assigned to project with roles + """ + # Create user resource to be able to get info about user + user_resource = OpenstackUser( + client_config=project_resource.client_config, + logger=ctx.logger + ) + + # Create user role resource to be able to get info about role + role_resource = OpenstackRole( + client_config=project_resource.client_config, + logger=ctx.logger + ) + + for user in users: + user_roles = user.get(IDENTITY_ROLES, []) + user_item = user_resource.find_user(user.get('name')) + if not user_item: + raise NonRecoverableError('User {0} is not found' + ''.format(user['name'])) + + for role in user_roles: + user_role = role_resource.find_role(role) + if not user_role: + raise NonRecoverableError('Role {0} is not found'.format(role)) + + # Assign project role to user + role_resource.assign_project_role_to_user( + project_id=project_resource.resource_id, + user_id=user_item.id, + role_id=user_role.id) + + ctx.logger.info( + 'Assigned user {0} to project {1} with role {2}'.format( + user_item.id, project_resource.resource_id, user_role.id)) + + +def _validate_users(client_config, users): + """ + This method will validate if the users are already exists before doing + any role assignment. Morever, it will check if the roles also exist or not + :param list users: List of users (dict) that contains user names and + roles associated + :param client_config: Openstack configuration in order to connect to + openstack + """ + + # Create user resource to be able to get info about user + user_resource = OpenstackUser(client_config=client_config, + logger=ctx.logger) + + # Create user role resource to be able to get info about role + role_resource = OpenstackRole(client_config=client_config, + logger=ctx.logger) + + user_names = [user.get('name') for user in users] + if len(user_names) > len(set(user_names)): + raise NonRecoverableError(' Provided users are not unique') + + for user_name in user_names: + user = user_resource.find_user(user_name) + if not user: + raise NonRecoverableError( + 'User {0} is not found'.format(user_name)) + + for user in users: + if len(user[IDENTITY_ROLES]) > len(set(user[IDENTITY_ROLES])): + msg = 'Roles for user {0} are not unique' + raise NonRecoverableError(msg.format(user.get('name'))) + + role_names = {role for user in users for role in user.get(IDENTITY_ROLES)} + for role_name in role_names: + user_role = role_resource.find_role(role_name) + if not user_role: + raise NonRecoverableError( + 'Role {0} is not found'.format(role_name)) + + +@with_openstack_resource(OpenstackProject) +def create(openstack_resource): + """ + Create openstack project resource + :param openstack_resource: Instance of openstack project resource + """ + created_resource = openstack_resource.create() + ctx.instance.runtime_properties[RESOURCE_ID] = created_resource.id + + +@with_openstack_resource(OpenstackProject) +def start(openstack_resource): + """ + Prepare users to be added to created project + :param openstack_resource: Instance of openstack project resource + """ + + # Check if project node has associated users that should be added + if ctx.node.properties.get(IDENTITY_USERS): + + # Before start assigning roles user, there is a validation that must be + # run first to check if the the provided users and their roles are + # already exist + users = ctx.node.properties[IDENTITY_USERS] + _validate_users(openstack_resource.client_config, users) + + # Assign project role to users + _assign_users(openstack_resource, users) + + # Check if project node has quota information that should be updated for + # project + # TODO The openstack should be extended in order to add support for + # quota update + if ctx.node.properties.get(IDENTITY_QUOTA): + raise NonRecoverableError('Openstack SDK does not support updating ' + 'quota for project') + + +@with_openstack_resource(OpenstackProject) +def delete(openstack_resource): + """ + Delete current openstack project + :param openstack_resource: instance of openstack project resource + """ + openstack_resource.delete() + + +@with_openstack_resource(OpenstackProject) +def update(openstack_resource, args): + """ + Update openstack project by passing args dict that contains the info + that need to be updated + :param openstack_resource: instance of openstack project resource + :param args: dict of information need to be updated + """ + args = reset_dict_empty_keys(args) + openstack_resource.update(args) + + +@with_openstack_resource(OpenstackProject) +def list_projects(openstack_resource, query=None): + """ + List openstack projects + :param openstack_resource: Instance of openstack project. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. + """ + projects = openstack_resource.list(query) + add_resource_list_to_runtime_properties(PROJECT_OPENSTACK_TYPE, projects) + + +@with_openstack_resource(OpenstackProject) +def creation_validation(openstack_resource): + """ + This method is to check if we can create project resource in openstack + :param openstack_resource: Instance of current openstack project + """ + validate_resource_quota(openstack_resource, PROJECT_OPENSTACK_TYPE) + ctx.logger.debug('OK: project configuration is valid') + + +@with_openstack_resource(OpenstackProject) +def get_project_quota(openstack_resource): + """ + This method is to get quota for project resource in openstack + :param openstack_resource: Instance of current openstack project + """ + # TODO The openstack should be extended in order to add support for + # retrieving quota for project + raise NonRecoverableError('Openstack SDK does not support retrieving ' + 'quota for project') + + +@with_openstack_resource(OpenstackProject) +def update_project_quota(openstack_resource): + """ + This method is to update quota project resource in openstack + :param openstack_resource: Instance of current openstack project + """ + # TODO The openstack should be extended in order to add support for + # get update + raise NonRecoverableError('Openstack SDK does not support updating ' + 'quota for project') diff --git a/openstack_plugin/resources/identity/user.py b/openstack_plugin/resources/identity/user.py new file mode 100644 index 00000000..464eabd8 --- /dev/null +++ b/openstack_plugin/resources/identity/user.py @@ -0,0 +1,67 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Third party imports +from cloudify import ctx + +# Local imports +from openstack_sdk.resources.identity import OpenstackUser +from openstack_plugin.decorators import with_openstack_resource +from openstack_plugin.constants import (RESOURCE_ID, USER_OPENSTACK_TYPE) +from openstack_plugin.utils import (reset_dict_empty_keys, + add_resource_list_to_runtime_properties) + + +@with_openstack_resource(OpenstackUser) +def create(openstack_resource): + """ + Create openstack user resource + :param openstack_resource: Instance of openstack user resource + """ + created_resource = openstack_resource.create() + ctx.instance.runtime_properties[RESOURCE_ID] = created_resource.id + + +@with_openstack_resource(OpenstackUser) +def delete(openstack_resource): + """ + Delete current openstack user + :param openstack_resource: instance of openstack user resource + """ + openstack_resource.delete() + + +@with_openstack_resource(OpenstackUser) +def update(openstack_resource, args): + """ + Update openstack user by passing args dict that contains the info + that need to be updated + :param openstack_resource: instance of openstack user resource + :param args: dict of information need to be updated + """ + args = reset_dict_empty_keys(args) + openstack_resource.update(args) + + +@with_openstack_resource(OpenstackUser) +def list_users(openstack_resource, query=None): + """ + List openstack users + :param openstack_resource: Instance of openstack user. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. + """ + users = openstack_resource.list(query) + add_resource_list_to_runtime_properties(USER_OPENSTACK_TYPE, users) diff --git a/manager_tests/test_openstack.py b/openstack_plugin/resources/network/__init__.py similarity index 100% rename from manager_tests/test_openstack.py rename to openstack_plugin/resources/network/__init__.py diff --git a/openstack_plugin/resources/network/floating_ip.py b/openstack_plugin/resources/network/floating_ip.py new file mode 100644 index 00000000..1f26d148 --- /dev/null +++ b/openstack_plugin/resources/network/floating_ip.py @@ -0,0 +1,263 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Third party imports +from cloudify import ctx +from cloudify.exceptions import (RecoverableError, NonRecoverableError) + +# Local imports +from openstack_sdk.resources.networks import (OpenstackFloatingIP, + OpenstackNetwork) +from openstack_plugin.decorators import with_openstack_resource +from openstack_plugin.constants import (RESOURCE_ID, + FLOATING_IP_OPENSTACK_TYPE, + NETWORK_OPENSTACK_TYPE, + SUBNET_OPENSTACK_TYPE, + PORT_OPENSTACK_TYPE) +from openstack_plugin.utils import ( + reset_dict_empty_keys, + validate_resource_quota, + add_resource_list_to_runtime_properties, + find_openstack_ids_of_connected_nodes_by_openstack_type) + + +def use_external_floating_ip(openstack_resource): + """ + This method will allow floating ip reallocation whenever + use_external_resource is ste to "True" and "allow_reallocation" is enabled + :param openstack_resource: Instance Of OpenstackFloatingIP in order to + use it + """ + + remote_resource = openstack_resource.get() + status = remote_resource.status + floating_ip = remote_resource.floating_ip_address + if not ctx.node.properties['allow_reallocation'] and status == 'ACTIVE': + raise RecoverableError( + 'Floating IP address {0} is already associated'.format(floating_ip) + ) + # Set the floating ip address as runtime property if "allow_reallocation" + # is set to "True" + ctx.instance.runtime_properties['floating_ip_address'] = floating_ip + + +def _get_floating_network_id_from_relationship(resource_type): + """ + This method will find if floating ip node is connected to the following + resource types: + - Port + - Network + - Subnet + Using relationship and will raise error if it is connected to + multiple resources + :param str resource_type: Instance of openstack floating ip + resource + :return str floating_network_id: Floating network id + """ + # Get the network id from relationship if it is existed + resource_ids = find_openstack_ids_of_connected_nodes_by_openstack_type( + ctx, resource_type) + # Check if floating ip is connected to multiple resources + if len(resource_ids) > 1: + raise NonRecoverableError( + 'Cannot attach floating ip to multiple ' + '{0}s {1}'.format(','.join(resource_ids), resource_type)) + + return resource_ids[0] if resource_ids else None + + +def _update_floating_ip_port(floating_ip_resource): + """ + This method will try to update floating ip config with port + configurations using the relationships connected with floating ip node + :param dict floating_ip_resource: Instance of openstack floating ip + resource + """ + + # Check to see if the floating port id is provided on the floating ip + # config properties + floating_ip_config = floating_ip_resource.config + port_id = floating_ip_config.get('port_id') + + # Get the floating port id from relationship if it is existed + rel_port_id = \ + _get_floating_network_id_from_relationship(PORT_OPENSTACK_TYPE) + if port_id and rel_port_id: + raise NonRecoverableError('Floating IP can\'t both have the ' + '"port_id" property and be ' + 'connected to a port via a ' + 'relationship at the same time') + + if port_id or rel_port_id: + floating_ip_config['port_id'] = rel_port_id or port_id + + +def _update_floating_ip_subnet(floating_ip_resource): + """ + This method will try to update floating ip config with subnet + configurations using the relationships connected with floating ip node + :param dict floating_ip_resource: Instance of openstack floating ip + resource + """ + # Check to see if the floating port id is provided on the floating ip + # config properties + floating_ip_config = floating_ip_resource.config + subnet_id = floating_ip_config.get('subnet_id') + + # Get the floating subnet id from relationship if it is existed + rel_subnet_id = \ + _get_floating_network_id_from_relationship(SUBNET_OPENSTACK_TYPE) + if subnet_id and rel_subnet_id: + raise NonRecoverableError('Floating IP can\'t both have the ' + '"subnet_id" property and be ' + 'connected to a subnet via a ' + 'relationship at the same time') + + if subnet_id or rel_subnet_id: + floating_ip_config['subnet_id'] = rel_subnet_id or subnet_id + + +def _update_floating_ip_network(floating_ip_resource): + """ + This method will try to update floating ip config with network + configurations using the relationships connected with floating ip node + :param dict floating_ip_resource: Instance of openstack floating ip + resource + """ + + # Check to see if the floating network id is provided on the floating ip + # config properties + floating_ip_config = floating_ip_resource.config + floating_network_id = floating_ip_config.get('floating_network_id') + + # Check if floating_network_name is provided + floating_network_name = floating_ip_config.get('floating_network_name') + + # Get the floating network id from relationship if it is existed + rel_floating_network_id = \ + _get_floating_network_id_from_relationship(NETWORK_OPENSTACK_TYPE) + + if floating_network_id and floating_network_name \ + or (floating_network_id and rel_floating_network_id)\ + or (floating_network_name and rel_floating_network_id): + raise NonRecoverableError('Floating ip can\'t have the ' + '"floating network properties and be ' + 'connected to a network via a ' + 'relationship at the same time') + + # Check if floating network name is provided or not + if floating_network_name: + # Create network instance to get the network id + network = OpenstackNetwork( + client_config=floating_ip_resource.client_config, + logger=ctx.logger) + # Set the network name provided in "resource_config" + network.name = floating_network_name + # Lookup remote network + remote_network = network.find_network() + if not remote_network: + raise NonRecoverableError('Floating IP network {0} not found' + ''.format(floating_network_name)) + # Set "floating_network_id" to the remote network id + floating_network_id = remote_network.id + # Clean "floating_network_name" from floating_ip_config since it is + # not part of the payload request for creating floating ip + del floating_ip_config['floating_network_name'] + + # Set the final "floating_network_id" value based on the computation above + floating_ip_config['floating_network_id'] = \ + floating_network_id or rel_floating_network_id + + +def _update_floating_ip_config(floating_ip_resource): + """ + This method will try to update floating ip config with network | subnet + | port configurations using the relationships connected with floating ip + node + :param dict floating_ip_resource: Instance of openstack floating ip + resource + """ + + # Update floating ip network + _update_floating_ip_network(floating_ip_resource) + # Update floating ip port + _update_floating_ip_port(floating_ip_resource) + # Update floating ip subnet + _update_floating_ip_subnet(floating_ip_resource) + + +@with_openstack_resource(class_decl=OpenstackFloatingIP, + existing_resource_handler=use_external_floating_ip) +def create(openstack_resource): + """ + Create openstack floating ip instance + :param openstack_resource: Instance of openstack floating ip resource + """ + # Update floating ip config + _update_floating_ip_config(openstack_resource) + # Create openstack resource + created_resource = openstack_resource.create() + # Update runtime properties for floating ip + ctx.instance.runtime_properties[RESOURCE_ID] = \ + created_resource.id + ctx.instance.runtime_properties['floating_ip_address'] = \ + created_resource.floating_ip_address + + +@with_openstack_resource(OpenstackFloatingIP) +def delete(openstack_resource): + """ + Delete current openstack floating ip + :param openstack_resource: Instance of openstack floating ip resource + """ + openstack_resource.delete() + + +@with_openstack_resource(OpenstackFloatingIP) +def update(openstack_resource, args): + """ + Update openstack floating ip by passing args dict that contains the info + that need to be updated + :param openstack_resource: instance of openstack floating ip resource + :param args: dict of information need to be updated + """ + # At some case like remove ip from port, openstack API refuse to to set + # port_id to '' empty string in order to delete the port, it should be + # set to None in order to set it, so it is required to change '' to None + new_config = reset_dict_empty_keys(args) + openstack_resource.update(new_config) + + +@with_openstack_resource(OpenstackFloatingIP) +def list_floating_ips(openstack_resource, query=None): + """ + List openstack floating ips based on filters applied + :param openstack_resource: Instance of current openstack floating ip + :param kwargs query: Optional query parameters to be sent to limit + the floating ips being returned. + """ + floating_ips = openstack_resource.list(query) + add_resource_list_to_runtime_properties( + FLOATING_IP_OPENSTACK_TYPE, floating_ips) + + +@with_openstack_resource(OpenstackFloatingIP) +def creation_validation(openstack_resource): + """ + This method is to check if we can create floating ip resource in openstack + :param openstack_resource: Instance of current openstack floating ip + """ + validate_resource_quota(openstack_resource, FLOATING_IP_OPENSTACK_TYPE) + ctx.logger.debug('OK: floating ip configuration is valid') diff --git a/openstack_plugin/resources/network/network.py b/openstack_plugin/resources/network/network.py new file mode 100644 index 00000000..02109e92 --- /dev/null +++ b/openstack_plugin/resources/network/network.py @@ -0,0 +1,79 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Third part imports +from cloudify import ctx + +# Local imports +from openstack_sdk.resources.networks import OpenstackNetwork +from openstack_plugin.decorators import with_openstack_resource +from openstack_plugin.constants import RESOURCE_ID +from openstack_plugin.utils import (validate_resource_quota, + reset_dict_empty_keys, + add_resource_list_to_runtime_properties) +from openstack_plugin.constants import NETWORK_OPENSTACK_TYPE + + +@with_openstack_resource(OpenstackNetwork) +def create(openstack_resource): + """ + Create openstack network instance + :param openstack_resource: instance of openstack network resource + """ + created_resource = openstack_resource.create() + ctx.instance.runtime_properties[RESOURCE_ID] = created_resource.id + + +@with_openstack_resource(OpenstackNetwork) +def delete(openstack_resource): + """ + Delete current openstack network + :param openstack_resource: instance of openstack network resource + """ + openstack_resource.delete() + + +@with_openstack_resource(OpenstackNetwork) +def update(openstack_resource, args): + """ + Update openstack network by passing args dict that contains the info that + need to be updated + :param openstack_resource: instance of openstack network resource + :param args: dict of information need to be updated + """ + args = reset_dict_empty_keys(args) + openstack_resource.update(args) + + +@with_openstack_resource(OpenstackNetwork) +def list_networks(openstack_resource, query=None): + """ + List openstack networks based on filters applied + :param openstack_resource: Instance of current openstack network + :param kwargs query: Optional query parameters to be sent to limit + the networks being returned. + """ + networks = openstack_resource.list(query) + add_resource_list_to_runtime_properties(NETWORK_OPENSTACK_TYPE, networks) + + +@with_openstack_resource(OpenstackNetwork) +def creation_validation(openstack_resource): + """ + This method is to check if we can create network resource in openstack + :param openstack_resource: Instance of current openstack network + """ + validate_resource_quota(openstack_resource, NETWORK_OPENSTACK_TYPE) + ctx.logger.debug('OK: network configuration is valid') diff --git a/openstack_plugin/resources/network/port.py b/openstack_plugin/resources/network/port.py new file mode 100644 index 00000000..4948eae8 --- /dev/null +++ b/openstack_plugin/resources/network/port.py @@ -0,0 +1,276 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Third party imports +from cloudify import ctx +from cloudify.exceptions import NonRecoverableError + +# Local imports +from openstack_sdk.resources.networks import OpenstackPort +from openstack_plugin.decorators import with_openstack_resource +from openstack_plugin.constants import (RESOURCE_ID, + PORT_OPENSTACK_TYPE, + NETWORK_OPENSTACK_TYPE, + SECURITY_GROUP_OPENSTACK_TYPE) +from openstack_plugin.utils import ( + update_runtime_properties, + reset_dict_empty_keys, + validate_resource_quota, + add_resource_list_to_runtime_properties, + find_openstack_ids_of_connected_nodes_by_openstack_type +) + + +def _update_network_config(port_config): + """ + This method will try to update oprt config with network configurations + using the relationships connected with port node + :param port_config: The port configuration required in order to + create the port instance using Openstack API + """ + # Get network id from port config + network_id = port_config.get('network_id') + + # Get the network id from relationship if any + rel_network_ids = find_openstack_ids_of_connected_nodes_by_openstack_type( + ctx, NETWORK_OPENSTACK_TYPE) + + rel_network_id = rel_network_ids[0] if rel_network_ids else None + # Check if network config comes from two sources or not + if network_id and rel_network_id: + raise NonRecoverableError('Port can\'t both have the ' + '"network_id" property and be ' + 'connected to a network via a ' + 'relationship at the same time') + + port_config['network_id'] = network_id or rel_network_id + + +def _update_security_groups_config(port_config): + """ + This method will try to update oprt config with securit groups + configurations using the relationships connected with port node + :param port_config: The port configuration required in order to + create the port instance using Openstack API + """ + + # Get security groups from port config + security_groups = port_config.get('security_groups') + + # Get the security groups from relationship if any + rel_security_groups = \ + find_openstack_ids_of_connected_nodes_by_openstack_type( + ctx, SECURITY_GROUP_OPENSTACK_TYPE) + + # Check if network config comes from two sources or not + if rel_security_groups and security_groups: + raise NonRecoverableError('Port can\'t both have the ' + '"security_groups" property and be ' + 'connected to a network via a ' + 'relationship at the same time') + + port_config['security_groups'] = security_groups or rel_security_groups + + +def _update_port_config(port_config): + """ + This method will try to resolve if there are any nodes connected to the + port node and try to update the configurations from nodes in order to + help create port from configurations + :param port_config: The port configuration required in order to + create the port instance using Openstack API + """ + + # Update network config for port node + _update_network_config(port_config) + + # Update security groups config for port node + _update_security_groups_config(port_config) + + +def _update_external_port(openstack_resource): + """ + This method will update external port by attaching new ips to external + port + :param openstack_resource: Instance Of OpenstackPort in order to + use it + """ + # Get the external port using the resource id provided via port node + external_port = openstack_resource.get() + # Check if the current port node has allowed_address_pairs as part of + # resource_config + addresses_to_add = openstack_resource.config.get('allowed_address_pairs') + if addresses_to_add: + old_addresses = external_port.get('allowed_address_pairs') or [] + + # Get the old ips from the each pair + old_ips = \ + [ + old_address['ip_address'] + for old_address + in old_addresses if old_address.get('ip_address') + ] + # Get the ips need to be added to the external port + ips_to_add = \ + [ + address_to_add['ip_address'] + for address_to_add + in addresses_to_add if address_to_add.get('ip_address') + ] + + # Check if there are a common ips between old ips and the one we + # should add via node + common_ips = set(old_ips) & set(ips_to_add) + if common_ips: + raise NonRecoverableError( + 'Ips {0} are already assigned to {1}' + ''.format(common_ips, external_port.id)) + + # Update port for allowed paris + updated_port = openstack_resource.update( + {'allowed_address_pairs': addresses_to_add}) + # Update runtime properties + update_runtime_properties( + { + 'fixed_ips': updated_port.fixed_ips, + 'mac_address': updated_port.mac_address, + 'allowed_address_pairs': updated_port.allowed_address_pairs, + } + ) + + # Get the networks from relationships if they are existed + rel_network_ids = find_openstack_ids_of_connected_nodes_by_openstack_type( + ctx, NETWORK_OPENSTACK_TYPE) + + rel_network_id = rel_network_ids[0] if rel_network_ids else None + if rel_network_id: + port = openstack_resource.get() + if port['network_id'] != rel_network_id: + raise NonRecoverableError( + 'Expected external resources port {0} and network {1} ' + 'to be connected'.format(port.id, rel_network_id)) + + +def _clean_addresses_from_external_port(openstack_resource): + """ + This method will clean all updated addresses added to the external port + while the port node created at install workflow + :param openstack_resource: + """ + # Get the external port using the resource id provided via port node + external_port = openstack_resource.get() + # Check if the current port node has allowed_address_pairs as part of + # resource_config + addresses_to_remove = openstack_resource.config.get( + 'allowed_address_pairs') + + if addresses_to_remove: + remote_addresses = external_port.allowed_address_pairs or [] + # Get the remote ips from the each pair + remote_ips = \ + [ + remote_address['ip_address'] + for remote_address + in remote_addresses if remote_address.get('ip_address') + ] + + # Get the ips need to be removed to the external port + ips_to_remove = \ + [ + address_to_remove['ip_address'] + for address_to_remove + in addresses_to_remove if address_to_remove.get('ip_address') + ] + + # Check if there are a common ips between old ips and the one we + # should remove via node + diff_ips = set(remote_ips) - set(ips_to_remove) + diff_ips = list(diff_ips) if diff_ips else [] + updated_pairs = [] + for ip_address in diff_ips: + updated_pairs.append({'ip_address': ip_address}) + + # Update port for allowed paris + openstack_resource.update({'allowed_address_pairs': updated_pairs}) + + +@with_openstack_resource( + OpenstackPort, + existing_resource_handler=_update_external_port) +def create(openstack_resource): + """ + Create openstack port instance + :param openstack_resource: instance of openstack port resource + """ + # Update port config before create port + _update_port_config(openstack_resource.config) + + # Create port + created_resource = openstack_resource.create() + + # Handle runtime properties + update_runtime_properties( + { + RESOURCE_ID: created_resource.id, + 'fixed_ips': created_resource.fixed_ips, + 'mac_address': created_resource.mac_address, + 'allowed_address_pairs': created_resource.allowed_address_pairs, + } + ) + + +@with_openstack_resource( + OpenstackPort, + existing_resource_handler=_clean_addresses_from_external_port) +def delete(openstack_resource): + """ + Delete current openstack port + :param openstack_resource: instance of openstack port resource + """ + openstack_resource.delete() + + +@with_openstack_resource(OpenstackPort) +def update(openstack_resource, args): + """ + Update openstack port by passing args dict that contains the info that + need to be updated + :param openstack_resource: instance of openstack port resource + :param args: dict of information need to be updated + """ + args = reset_dict_empty_keys(args) + openstack_resource.update(args) + + +@with_openstack_resource(OpenstackPort) +def list_ports(openstack_resource, query=None): + """ + List openstack ports based on filters applied + :param openstack_resource: Instance of current openstack port + :param kwargs query: Optional query parameters to be sent to limit + the ports being returned. + """ + ports = openstack_resource.list(query) + add_resource_list_to_runtime_properties(PORT_OPENSTACK_TYPE, ports) + + +@with_openstack_resource(OpenstackPort) +def creation_validation(openstack_resource): + """ + This method is to check if we can create port resource in openstack + :param openstack_resource: Instance of current openstack port + """ + validate_resource_quota(openstack_resource, PORT_OPENSTACK_TYPE) + ctx.logger.debug('OK: port configuration is valid') diff --git a/openstack_plugin/resources/network/rbac_policy.py b/openstack_plugin/resources/network/rbac_policy.py new file mode 100644 index 00000000..723c8aa7 --- /dev/null +++ b/openstack_plugin/resources/network/rbac_policy.py @@ -0,0 +1,449 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Third party imports +from cloudify import ctx +from cloudify.exceptions import NonRecoverableError + +# Local imports +from openstack_sdk.resources.networks import (OpenstackRBACPolicy, + OpenstackNetwork, + OpenstackSubnet, + OpenstackPort) +from openstack_plugin.decorators import with_openstack_resource +from openstack_plugin.constants import (RESOURCE_ID, + OPENSTACK_TYPE_PROPERTY, + NETWORK_OPENSTACK_TYPE, + RBAC_POLICY_OPENSTACK_TYPE, + RBAC_POLICY_RELATIONSHIP_TYPE, + QOS_POLICY_OPENSTACK_TYPE) + +from openstack_plugin.utils import (reset_dict_empty_keys, + merge_resource_config, + validate_resource_quota, + add_resource_list_to_runtime_properties, + find_relationships_by_relationship_type) + + +def _get_rbac_policy_target_from_relationship(): + """ + Lookup target object that should apply rbac policy for and return it as + the following format + { + 'object_id': '9a332608-af04-4368-b696-3726a54f2a66' + 'object_type': 'network' + } + :return: Object info that contains details about object type & id + """ + + # Lookup the rbac policy relationship so that we can get the info that + # we need to create rbac policy and apply it for target object + rels = \ + find_relationships_by_relationship_type( + ctx, RBAC_POLICY_RELATIONSHIP_TYPE + ) + + # It could be no relationship find for the current node context which + # means that the node is not associated with any other node + if len(rels) == 0: + ctx.logger.info( + 'Resource for which RBAC policy may be applied ' + 'not found using {0} relationship' + .format(RBAC_POLICY_RELATIONSHIP_TYPE) + ) + + return {} + + # Since rbac policy allow only to be applied to one object at a time + # then we cannot define link rbac policy node with multiple nodes via + # "cloudify.relationships.openstack.rbac_policy_applied_to" + elif len(rels) > 1: + raise NonRecoverableError( + 'Multiple ({0}) resources for which RBAC policy may be applied ' + 'found using relationship {1}' + .format( + len(rels), + RBAC_POLICY_RELATIONSHIP_TYPE + ) + ) + + # Lookup the target instance in order to get the target object + # runtime properties which represent "type" & "id" + resource = rels[0].target.instance + ctx.logger.info( + '{0} resource for which RBAC policy may be applied ' + 'found using {1} relationship)' + .format(resource, RBAC_POLICY_RELATIONSHIP_TYPE) + ) + + # Get the instance runtime properties for both "id" & "type" + resource_id = resource.runtime_properties.get(RESOURCE_ID) + resource_type = resource.runtime_properties.get(OPENSTACK_TYPE_PROPERTY) + + # If we cannot find these attributes then we can skip that and depend on + # the rbac policy to resolve "object_type" & "object_id" + if not resource_id or not resource_type: + ctx.logger.warn( + 'Found using relationship resource has not defined either ' + '"id" or "type" runtime_property. Skipping.' + ) + + return {} + + # Return the object info needed to be wrapped into API request when + # create rbac request + return { + 'object_type': resource_type, + 'object_id': resource_id + } + + +def _validate_config_for_applied_rbac_resource(input_dict, target_object): + """ + Validate that resource does not contain multiple definitions for rbac + policy that allow user to specify them using properties, operation + inputs and relationship + :param dict input_dict: Target object config provided via properties or + operation inputs + :param dict target_object: Target object config provided via relationship + """ + if target_object: + for key in target_object.keys(): + if input_dict and input_dict.get(key): + raise NonRecoverableError( + 'Multiple definitions of resource for which ' + 'RBAC policy should be applied. ' + 'You specified it both using properties / operation ' + 'inputs and relationship.' + ) + + +def _get_rbac_policy_target_object(openstack_resource, args): + """ + Lookup the target object that need to apply rbac policy for + :param openstack_resource: instance of openstack rbac policy resource + :param dict args: RBAC policy configuration provided via task inputs + :return dict: Object info that contains details about object type & id + { + 'object_id': '9a332608-af04-4368-b696-3726a54f2a66' + 'object_type': 'network' + + } + """ + # Try to lookup the object_type & object_id from relationships first + object_info = _get_rbac_policy_target_from_relationship() + + # Validate the config rbac resources + if object_info: + for config in [openstack_resource.config, args]: + _validate_config_for_applied_rbac_resource(config, object_info) + + return object_info + + +def _prepare_rbac_policy_object(openstack_resource, args): + + """ + Prepare and generate rbac policy which will be used to create RBAC policy + This method mainly will do the following: + + 1 - Try to lookup target object via realtionship in order to apply rbac + policy + 2 - Merge provided config args with rbac policy node properties + + :param openstack_resource: instance of openstack rbac policy resource + :param dict args: RBAC policy configuration provided via task inputs + """ + + # Try to lookup if there is any target object that should be apply rabc on + target_object = _get_rbac_policy_target_object(openstack_resource, args) + if target_object: + openstack_resource.config['object_id'] = target_object['object_id'] + openstack_resource.config['object_type'] = target_object['object_type'] + + # If there is no target object (No relationship exists) then we need to + # check if the current node config contains all the info needed for + # target object + else: + object_id = openstack_resource.cofig.get('object_id') + object_type = openstack_resource.cofig.get('object_type') + if not (object_id and object_type): + raise NonRecoverableError( + 'Both object_id & object_type should be provided in order' + ' to create rbac policy' + ) + + # Check to see if there are some configuration provided via operation + # input so that we can merge them with volume config + merge_resource_config(openstack_resource.config, args) + + +def _disable_dhcp_for_subnets(client_config, resource_id): + """ + Disable dhcp for subnets associated with network so that rbac policy can + be removed + :param client_config: Openstack config required to make API calls + :param resource_id: resource_id: Resource id of the target object + """ + + network = OpenstackNetwork(client_config, logger=ctx.logger) + network.resource_id = resource_id + network_item = network.get() + # Disable dhcp option for all attached subnets associated with + # current network + for subnet_id in network_item.subnet_ids: + subnet = OpenstackSubnet(client_config, logger=ctx.logger) + subnet.resource_id = subnet_id + subnet_item = subnet.get() + # Disable dhcp for subnets if its already enabled, since this + # will prevent rbac policy from deletion + if subnet_item.is_dhcp_enabled: + subnet.update(new_config={'enable_dhcp': False}) + + +def _clean_ports_from_network(client_config, resource_id): + """ + Unset & clean ports associated with network + :param client_config: Openstack config required to make API calls + :param resource_id: resource_id: Resource id of the target object + """ + # The network could have another type of ports other than + # "network:dhcp" that should be delete in order to be able to delete + # rbac policy which can be controlled over "clean_ports" because + # sometime ports are created using cloudify blueprints which can be + # removed automatically whenever uninstall trigger. However, we may + # need to remove ports ourselves if resource is not created using + # cloudify + port = OpenstackPort(client_config, logger=ctx.logger) + for port_item in port.list(query={'network_id': resource_id}): + port.resource_id = port_item.id + port.update(new_config={'device_id': 'none'}) + port.delete() + + +def _clean_resources_from_target_object(client_config, + resource_id, + resource_type, + disable_dhcp=False, + clean_ports=False): + """ + This merhod will help to clean ports and disable dhcp for subnets before + delete rbac policy since cannot remove rbac policy before remove them + :param dict client_config: Openstack config required to make API calls + :param str resource_id: Resource id of the target object + :param str resource_type: Resource type of the target object (network) + :param bool disable_dhcp: Flag to allow disable dhcp for subnets + :param bool clean_ports: Flag to allow unset & clear ports + """ + + # The type of the object that the RBAC policy affects. include qos-policy + # or network. + if resource_type == NETWORK_OPENSTACK_TYPE: + if disable_dhcp: + _disable_dhcp_for_subnets(client_config, resource_id) + + if clean_ports: + _clean_ports_from_network(client_config, resource_id) + + elif resource_type == QOS_POLICY_OPENSTACK_TYPE: + # TODO since qos-policy not support right now, this should be added + # later on + pass + + +@with_openstack_resource(OpenstackRBACPolicy) +def create(openstack_resource, args): + """ + Create openstack rbac policy instance + :param openstack_resource: instance of openstack rbac policy resource + """ + _prepare_rbac_policy_object(openstack_resource, args) + created_resource = openstack_resource.create() + ctx.instance.runtime_properties[RESOURCE_ID] = created_resource.id + + +@with_openstack_resource(OpenstackRBACPolicy) +def delete(openstack_resource): + """ + Delete current openstack rbac policy instance + :param openstack_resource: instance of openstack srbac policy resource + """ + openstack_resource.delete() + + +@with_openstack_resource(OpenstackRBACPolicy) +def update(openstack_resource, args): + """ + Update openstack rbac policy by passing args dict that contains the info + that need to be updated + :param openstack_resource: instance of openstack rbac policy resource + :param args: dict of information need to be updated + """ + args = reset_dict_empty_keys(args) + openstack_resource.update(args) + + +@with_openstack_resource(OpenstackRBACPolicy) +def list_rbac_policies(openstack_resource, query=None): + """ + List openstack rbac policies based on filters applied + :param openstack_resource: Instance of current openstack rbac policy + :param kwargs query: Optional query parameters to be sent to limit + the rbac policies being returned. + """ + + rbac_policies = openstack_resource.list(query) + add_resource_list_to_runtime_properties(RBAC_POLICY_OPENSTACK_TYPE, + rbac_policies) + + +@with_openstack_resource(OpenstackRBACPolicy) +def find_and_delete(openstack_resource, + args, + disable_dhcp=False, + clean_ports=False): + """ + This method will help to find rbac policy object and delete it. + By Default "disable_dhcp" & "clean_ports" are set to False and they can + be enabled in order to help clean ports and disable dhcp. + + :param openstack_resource: Instance of current openstack rbac policy + :param dict args: RBAC policy object config + :param bool disable_dhcp: Flag to allow disable dhcp for subnets + :param bool clean_ports: Flag to allow unset & clear ports + """ + + _prepare_rbac_policy_object(openstack_resource, args) + rbac_policy_config = openstack_resource.config + + # Since "id" will be set as part of the current node instance, we need + # to remove it from the config since this operation main job is to find + # rbac policy based on the configuration provided by operation task and + # then remove it + rbac_policy_config.pop('id', None) + rbac_policies = openstack_resource.list() + + for rbac_policy in rbac_policies: + # In order to find the rbac policy we need to filter the rbac policy + # based on the following params + # - object_type + # - object_id + # - action + # - target_tenant + + # However, the response return from API server for listing rbac + # policies return the following params + # - id + # - project_id + # - action + # - location + # - object_id + # - object_type + # - name + # - target_project_id + + # We care only about these config + # - object_type + # - object_id + # - action + # - target_project_id + + # We need to do a mapping between "target_project_id" and + # "target_tenant" to do the comparison + def _parse_item(item): + return (item[0], item[1]) if item[0] != 'target_project_id'\ + else ('target_tenant', item[1]) + + rbac_policy = dict(map(_parse_item, rbac_policy.iteritems())) + if all(item in rbac_policy.items() + for item in rbac_policy_config.items()): + + # Found the target object which should be deleted + ctx.logger.info( + 'Found RBAC policy with ID: {0} - deleting ...' + ''.format(rbac_policy['id']) + ) + + # Call clean method + _clean_resources_from_target_object( + openstack_resource.client_config, + rbac_policy['object_id'], + NETWORK_OPENSTACK_TYPE, + disable_dhcp, + clean_ports + ) + # We need to delete the matched object + openstack_resource.resource_id = rbac_policy['id'] + openstack_resource.delete() + return + + ctx.logger.warn('No suitable RBAC policy found') + + +@with_openstack_resource(OpenstackRBACPolicy) +def creation_validation(openstack_resource): + """ + This method is to check if we can create rbac policy resource in openstack + :param openstack_resource: Instance of current openstack rbac policy + """ + validate_resource_quota(openstack_resource, RBAC_POLICY_OPENSTACK_TYPE) + ctx.logger.debug('OK: rbac policy configuration is valid') + + +@with_openstack_resource(OpenstackRBACPolicy) +def unlink_target_object(openstack_resource, + resource_id, + disable_dhcp=False, + clean_ports=False): + """ + This task method is to clean resources associated with resource which + are required to be removed before rbac policy get removed and this task + would be useful on the following cases : + + 1 - if resource type associated with rbac policy is network and has + subnets with dhcp enabled, then in order to remove the rbac policy it + is required to disable dhcb by passing "disable_dhcp" as "True" + + 2 - If resource type associated with rbac policy is network and has + ports that created outside cloudify, then we should unset these ports + and delete them by passing "clean_ports" as "True" + + + 3 - If resource type associated with rbac policy is network and has both + subnets with dhcp enabled & ports that created outside cloudify + then we should disable dhcb and clean ports by passing + "disable_dhcp" as "True" & "clean_ports" as "True" + + 4 - If resource type associated with rbac policy is network and has + ports that created inside cloudify, then we should not allow clean + and unset ports since these are going to be deleted automatically as + part of the install workflow, so based on that "clean_ports" should + be passed as "False" + + :param openstack_resource: Instance of current openstack rbac policy + :param str resource_id: Resource id of the target object (network) + :param bool disable_dhcp: Flag to allow disable dhcp for subnets + :param bool clean_ports: Flag to allow unset & clear ports + """ + resource_type = \ + ctx.target.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] + + _clean_resources_from_target_object( + openstack_resource.client_config, + resource_id, + resource_type, + disable_dhcp, + clean_ports + ) diff --git a/openstack_plugin/resources/network/router.py b/openstack_plugin/resources/network/router.py new file mode 100644 index 00000000..d4a40808 --- /dev/null +++ b/openstack_plugin/resources/network/router.py @@ -0,0 +1,258 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Third party imports +from cloudify import ctx +from cloudify.exceptions import NonRecoverableError + +# Local imports +from openstack_sdk.resources.networks import (OpenstackRouter, + OpenstackNetwork) +from openstack_plugin.decorators import with_openstack_resource +from openstack_plugin.constants import (RESOURCE_ID, + ROUTER_OPENSTACK_TYPE, + NETWORK_OPENSTACK_TYPE) +from openstack_plugin.utils import ( + reset_dict_empty_keys, + validate_resource_quota, + add_resource_list_to_runtime_properties, + find_openstack_ids_of_connected_nodes_by_openstack_type) + + +def _get_external_network_id(ext_gateway_info): + """ + This method will lookup the external network id from external gateway + info object + :param dict ext_gateway_info: External info dict + :return str: External network id + """ + if ext_gateway_info and ext_gateway_info.get('network_id'): + return ext_gateway_info['network_id'] + return None + + +def _get_connected_external_network_from_relationship(network_resource): + """ + This method will lookup external network connected to network using + relationship + :param network_resource: Instance of openstack network resource + :return str: External network id + """ + # Get networks connected to router + networks = \ + find_openstack_ids_of_connected_nodes_by_openstack_type( + ctx, + NETWORK_OPENSTACK_TYPE) + # List to save all external networks connected to router + external_network_ids = [] + + for net_id in networks: + network_resource.resource_id = net_id + remote_network = network_resource.get() + if remote_network.is_router_external: + external_network_ids.append(net_id) + + if len(external_network_ids) > 1: + raise NonRecoverableError( + 'More than one external network is connected to router {0}' + ' by a relationship; External network IDs: {0}'.format( + external_network_ids)) + + return external_network_ids[0] if external_network_ids else None + + +def _connect_router_to_external_network(router_resource): + """ + This method will update router config with external network by checking + if it is provided using node property "resource_config" or via + relationship and we should only connect router to external network from + one source + :param router_resource: Instance of openstack router resource + """ + if not router_resource or router_resource and not router_resource.config: + return + + network_resource = \ + OpenstackNetwork(client_config=router_resource.client_config, + logger=ctx.logger) + # Get network id from "resource_config" which represent "router_config" + ext_net_id = \ + _get_external_network_id( + router_resource.config.get('external_gateway_info')) + + # Get network id id from relationship connected to router + rel_ext_net_id = \ + _get_connected_external_network_from_relationship(network_resource) + + if ext_net_id and rel_ext_net_id: + raise NonRecoverableError('Router can\'t both have the ' + '"external_gateway_info" property and be ' + 'connected to a network via a ' + 'relationship at the same time') + + if 'external_gateway_info' not in router_resource.config: + router_resource.config['external_gateway_info'] = {} + + router_resource.config['external_gateway_info']['network_id'] = \ + ext_net_id or rel_ext_net_id + + +def _handle_external_router_resource(openstack_resource): + """ + This method is to do a validation for external router resource when it + is connected to external network node resource + :param openstack_resource: Instance of openstack router resource + """ + remote_router = openstack_resource.get() + network_resource = \ + OpenstackNetwork(client_config=openstack_resource.client_config, + logger=ctx.logger) + rel_network_id = \ + _get_connected_external_network_from_relationship(network_resource) + ext_network_id = \ + _get_external_network_id(remote_router.external_gateway_info) + if rel_network_id and ext_network_id != rel_network_id: + raise NonRecoverableError( + 'Expected external resources subnet {0} and network' + ' {1} to be connected'.format(rel_network_id, ext_network_id)) + + +@with_openstack_resource( + OpenstackRouter, + existing_resource_handler=_handle_external_router_resource) +def create(openstack_resource): + """ + Create openstack router instance + :param openstack_resource: Instance of openstack router resource + """ + # Update router with the correct external network, so that they can be + # connected to each other successfully + _connect_router_to_external_network(openstack_resource) + + # Create router + created_resource = openstack_resource.create() + + # Save router resource id as runtime property + ctx.instance.runtime_properties[RESOURCE_ID] = created_resource.id + + +@with_openstack_resource(OpenstackRouter) +def delete(openstack_resource): + """ + Delete current openstack router + :param openstack_resource: instance of openstack router resource + """ + openstack_resource.delete() + + +@with_openstack_resource(OpenstackRouter) +def update(openstack_resource, args): + """ + Update openstack router by passing args dict that contains the info that + need to be updated + :param openstack_resource: instance of openstack router resource + :param args: dict of information need to be updated + """ + args = reset_dict_empty_keys(args) + openstack_resource.update(args) + + +@with_openstack_resource(OpenstackRouter) +def list_routers(openstack_resource, query=None): + """ + List openstack routers based on filters applied + :param openstack_resource: Instance of current openstack router + :param kwargs query: Optional query parameters to be sent to limit + the routers being returned. + """ + routers = openstack_resource.list(query) + add_resource_list_to_runtime_properties(ROUTER_OPENSTACK_TYPE, routers) + + +@with_openstack_resource(OpenstackRouter) +def creation_validation(openstack_resource): + """ + This method is to check if we can create router resource in openstack + :param openstack_resource: Instance of current openstack router + """ + validate_resource_quota(openstack_resource, ROUTER_OPENSTACK_TYPE) + ctx.logger.debug('OK: router configuration is valid') + + +@with_openstack_resource(OpenstackRouter) +def add_interface_to_router(openstack_resource, **kwargs): + """ + Add interface to router in order to link router with other services like + (port, subnet) + :param openstack_resource: instance of openstack router resource + :param kwargs: Configuration must be provided in order to connect with + router and these configuration are subnet_id, port_id + """ + openstack_resource.add_interface(kwargs) + + +@with_openstack_resource(OpenstackRouter) +def remove_interface_from_router(openstack_resource, **kwargs): + """ + Remove interface to router in order to unlink router with other services + like (port, subnet) + :param openstack_resource: instance of openstack router resource + :param kwargs: Configuration must be provided in order to connect with + router and these configuration are subnet_id, port_id + """ + openstack_resource.remove_interface(kwargs) + + +@with_openstack_resource(OpenstackRouter) +def start(openstack_resource, **kwargs): + """ + Add static routes for router + :param openstack_resource: instance of openstack router resource + :param kwargs: Routes configuration which should be added to router table + """ + if kwargs and kwargs.get('routes'): + # Store routes in order to use them later on in order to remove them + # when the stop operation for router trigger + ctx.instance.runtime_properties['routes'] = kwargs['routes'] + routes = dict() + routes['routes'] = kwargs['routes'] + openstack_resource.update(routes) + + +@with_openstack_resource(OpenstackRouter) +def stop(openstack_resource): + """ + Remove static routes which added before for router + :param openstack_resource: instance of openstack router resource + """ + if 'routes' in ctx.instance.runtime_properties: + # There are some routes need to be deleted since it is part of the + # runtime properties + + # Routes need to be removed + routes_to_delete = ctx.instance.runtime_properties['routes'] + + # Get the remote router info + router = openstack_resource.get() + + updated_routes = [] + remote_routes = router['routes'] or {} + for remote_route in remote_routes: + if remote_route not in routes_to_delete: + updated_routes.append(remote_route) + + routes = dict() + routes['routes'] = updated_routes + openstack_resource.update(routes) diff --git a/openstack_plugin/resources/network/security_group.py b/openstack_plugin/resources/network/security_group.py new file mode 100644 index 00000000..84ceaa07 --- /dev/null +++ b/openstack_plugin/resources/network/security_group.py @@ -0,0 +1,122 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Third party imports +from cloudify import ctx + +# Local imports +from openstack_sdk.resources.networks import OpenstackSecurityGroup +from openstack_sdk.resources.networks import OpenstackSecurityGroupRule +from openstack_plugin.decorators import with_openstack_resource +from openstack_plugin.constants import (RESOURCE_ID, + SECURITY_GROUP_OPENSTACK_TYPE) +from openstack_plugin.utils import (reset_dict_empty_keys, + validate_resource_quota, + add_resource_list_to_runtime_properties) + + +@with_openstack_resource(OpenstackSecurityGroup) +def create(openstack_resource): + """ + Create openstack security group instance + :param openstack_resource: instance of openstack security group resource + """ + created_resource = openstack_resource.create() + ctx.instance.runtime_properties[RESOURCE_ID] = created_resource.id + + +@with_openstack_resource(OpenstackSecurityGroup) +def configure(openstack_resource, security_group_rules=None): + """ + This task will allow to add security group rules and attach them to + created security group if they provided on the node configuration + :param openstack_resource: security group instance + :param security_group_rules: List of security group rules + """ + client_config = ctx.node.properties.get('client_config') + security_group_id = openstack_resource.resource_id + + # Define security group rule instance + security_group_rule = \ + OpenstackSecurityGroupRule(client_config=client_config, + logger=ctx.logger) + + # Check if the "disable_default_egress_rules" is enabled or not so that + # we can remove default egress rules for current security group + if ctx.node.properties.get('disable_default_egress_rules'): + for sg_rule in security_group_rule.list( + query={'security_group_id': security_group_id}): + + security_group_rule.resource_id = sg_rule.id + security_group_rule.delete() + + security_group_rule.resource_id = None + # Check the existing rules attached to current security groups + # in order to apply them to that group + for rule_config in security_group_rules: + # Check if the config contains the security group id or not + if not rule_config.get('security_group_id'): + rule_config['security_group_id'] = security_group_id + + # Create new instance for each security group id + security_group_rule.config = rule_config + # Create security group rule + security_group_rule.create() + + +@with_openstack_resource(OpenstackSecurityGroup) +def delete(openstack_resource): + """ + Delete current openstack security group instance + :param openstack_resource: instance of openstack security group resource + """ + openstack_resource.delete() + + +@with_openstack_resource(OpenstackSecurityGroup) +def update(openstack_resource, args): + """ + Update openstack security group by passing args dict that contains + the info that need to be updated + :param openstack_resource: instance of openstack security group resource + :param args: dict of information need to be updated + """ + args = reset_dict_empty_keys(args) + openstack_resource.update(args) + + +@with_openstack_resource(OpenstackSecurityGroup) +def list_security_groups(openstack_resource, query=None): + """ + List openstack security groups based on filters applied + :param openstack_resource: Instance of current openstack security group + :param kwargs query: Optional query parameters to be sent to limit + the security groups being returned. + """ + + security_groups = openstack_resource.list(query) + add_resource_list_to_runtime_properties(SECURITY_GROUP_OPENSTACK_TYPE, + security_groups) + + +@with_openstack_resource(OpenstackSecurityGroup) +def creation_validation(openstack_resource): + """ + This method is to check if we can create security group resource + in openstack + :param openstack_resource: Instance of current openstack security group + """ + validate_resource_quota(openstack_resource, SECURITY_GROUP_OPENSTACK_TYPE) + ctx.logger.debug('OK: security group configuration is valid') diff --git a/openstack_plugin/resources/network/security_group_rule.py b/openstack_plugin/resources/network/security_group_rule.py new file mode 100644 index 00000000..8b3659b1 --- /dev/null +++ b/openstack_plugin/resources/network/security_group_rule.py @@ -0,0 +1,74 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Third party imports +from cloudify import ctx + +# Local imports +from openstack_sdk.resources.networks import OpenstackSecurityGroupRule +from openstack_plugin.decorators import with_openstack_resource +from openstack_plugin.constants import (RESOURCE_ID, + SECURITY_GROUP_RULE_OPENSTACK_TYPE) +from openstack_plugin.utils import (validate_resource_quota, + add_resource_list_to_runtime_properties) + + +@with_openstack_resource(OpenstackSecurityGroupRule) +def create(openstack_resource): + """ + Create openstack security group rule instance + :param openstack_resource: instance of openstack security group rule + resource + """ + created_resource = openstack_resource.create() + ctx.instance.runtime_properties[RESOURCE_ID] = created_resource.id + + +@with_openstack_resource(OpenstackSecurityGroupRule) +def delete(openstack_resource): + """ + Delete current openstack security group rule instance + :param openstack_resource: instance of openstack security group rule + resource + """ + openstack_resource.delete() + + +@with_openstack_resource(OpenstackSecurityGroupRule) +def list_security_group_rules(openstack_resource, query=None): + """ + List openstack security group rules based on filters applied + :param openstack_resource: Instance of current openstack security group + rule + :param kwargs query: Optional query parameters to be sent to limit + the security group rules being returned. + """ + + security_group_rules = openstack_resource.list(query) + add_resource_list_to_runtime_properties(SECURITY_GROUP_RULE_OPENSTACK_TYPE, + security_group_rules) + + +@with_openstack_resource(OpenstackSecurityGroupRule) +def creation_validation(openstack_resource): + """ + This method is to check if we can create security group rule resource + in openstack + :param openstack_resource: Instance of current openstack security rule + group + """ + validate_resource_quota(openstack_resource, + SECURITY_GROUP_RULE_OPENSTACK_TYPE) + ctx.logger.debug('OK: security group rule configuration is valid') diff --git a/openstack_plugin/resources/network/subnet.py b/openstack_plugin/resources/network/subnet.py new file mode 100644 index 00000000..0e51f840 --- /dev/null +++ b/openstack_plugin/resources/network/subnet.py @@ -0,0 +1,143 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Third party imports +from cloudify import ctx +from cloudify.exceptions import NonRecoverableError + +# Local imports +from openstack_sdk.resources.networks import OpenstackSubnet +from openstack_plugin.decorators import with_openstack_resource +from openstack_plugin.constants import (RESOURCE_ID, + SUBNET_OPENSTACK_TYPE, + NETWORK_OPENSTACK_TYPE) +from openstack_plugin.utils import ( + reset_dict_empty_keys, + validate_resource_quota, + add_resource_list_to_runtime_properties, + find_openstack_ids_of_connected_nodes_by_openstack_type) + + +def _get_subnet_network_id_from_relationship(): + """ + This method will lookup the network id for subnet using relationship + and will raise error if it returns multiple network + :return str network_id: Network id + """ + # Get the network id from relationship if it is existed + network_ids = find_openstack_ids_of_connected_nodes_by_openstack_type( + ctx, NETWORK_OPENSTACK_TYPE) + # Check if subnet is connected to multiple networks + if len(network_ids) > 1: + raise NonRecoverableError('Cannot attach subnet to multiple ' + 'networks {0}'.format(','.join(network_ids))) + + return network_ids[0] if network_ids else None + + +def _update_subnet_config(subnet_config): + """ + This method will try to update subnet config with network configurations + using the relationships connected with subnet node + :param dict subnet_config: The subnet configuration required in order to + create the subnet instance using Openstack API + """ + + # Check to see if the network id is provided on the subnet config + # properties + network_id = subnet_config.get('network_id') + + # Get the network id from relationship if it is existed + rel_network_id = _get_subnet_network_id_from_relationship() + if network_id and rel_network_id: + raise NonRecoverableError('Subnet can\'t both have the ' + '"network_id" property and be ' + 'connected to a network via a ' + 'relationship at the same time') + + subnet_config['network_id'] = network_id or rel_network_id + + +def _handle_external_subnet_resource(openstack_resource): + """ + This method is to do a validation for external subnet resource when it + is connected to network node resource + :param openstack_resource: Instance of openstack subnet resource + """ + network_id = _get_subnet_network_id_from_relationship() + remote_subnet = openstack_resource.get() + if network_id and network_id != remote_subnet.network_id: + raise NonRecoverableError( + 'Expected external resources subnet {0} and network' + ' {1} to be connected'.format(remote_subnet.id, network_id)) + + +@with_openstack_resource( + OpenstackSubnet, + existing_resource_handler=_handle_external_subnet_resource) +def create(openstack_resource): + """ + Create openstack subnet instance + :param openstack_resource: instance of openstack subnet resource + """ + # Update subnet config before send create API request + _update_subnet_config(openstack_resource.config) + # Create subnet resource + created_resource = openstack_resource.create() + # Save resource id as runtime property + ctx.instance.runtime_properties[RESOURCE_ID] = created_resource.id + + +@with_openstack_resource(OpenstackSubnet) +def delete(openstack_resource): + """ + Delete current openstack subnet + :param openstack_resource: instance of openstack subnet resource + """ + openstack_resource.delete() + + +@with_openstack_resource(OpenstackSubnet) +def update(openstack_resource, args): + """ + Update openstack subnet by passing args dict that contains the info that + need to be updated + :param openstack_resource: instance of openstack subnet resource + :param args: dict of information need to be updated + """ + args = reset_dict_empty_keys(args) + openstack_resource.update(args) + + +@with_openstack_resource(OpenstackSubnet) +def list_subnets(openstack_resource, query=None): + """ + List openstack subnets based on filters applied + :param openstack_resource: Instance of current openstack network + :param kwargs query: Optional query parameters to be sent to limit + the networks being returned. + """ + subnets = openstack_resource.list(query) + add_resource_list_to_runtime_properties(SUBNET_OPENSTACK_TYPE, subnets) + + +@with_openstack_resource(OpenstackSubnet) +def creation_validation(openstack_resource): + """ + This method is to check if we can create subnet resource in openstack + :param openstack_resource: Instance of current openstack subnet + """ + validate_resource_quota(openstack_resource, SUBNET_OPENSTACK_TYPE) + ctx.logger.debug('OK: port configuration is valid') diff --git a/openstack_plugin/resources/volume/__init__.py b/openstack_plugin/resources/volume/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/openstack_plugin/resources/volume/volume.py b/openstack_plugin/resources/volume/volume.py new file mode 100644 index 00000000..48565ca4 --- /dev/null +++ b/openstack_plugin/resources/volume/volume.py @@ -0,0 +1,525 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Standard imports +import time + +# Third party imports +from cloudify import ctx +from cloudify.exceptions import (OperationRetry, NonRecoverableError) +import openstack.exceptions + +# Local imports +from openstack_sdk.resources.volume import (OpenstackVolume, + OpenstackVolumeBackup, + OpenstackVolumeSnapshot) +from openstack_plugin.decorators import with_openstack_resource +from openstack_plugin.constants import (RESOURCE_ID, + OPENSTACK_AZ_PROPERTY, + VOLUME_OPENSTACK_TYPE, + IMAGE_OPENSTACK_TYPE, + VOLUME_STATUS_AVAILABLE, + VOLUME_ERROR_STATUSES, + VOLUME_TASK_DELETE, + VOLUME_BACKUP_TASK, + VOLUME_SNAPSHOT_TASK, + VOLUME_BACKUP_ID, + VOLUME_SNAPSHOT_ID, + VOLUME_BOOTABLE, + VOLUME_BACKUP_OPENSTACK_TYPE, + VOLUME_SNAPSHOT_OPENSTACK_TYPE) +from openstack_plugin.utils import\ + (validate_resource_quota, + merge_resource_config, + get_ready_resource_status, + wait_until_status, + get_snapshot_name, + add_resource_list_to_runtime_properties, + find_openstack_ids_of_connected_nodes_by_openstack_type) + + +def _is_volume_backup_matched(backup_instance, volume_id, name): + """ + This method is to do try to match the remote backup | snapshot volume based + on volume id and backup name so that we can decide the object that should + be removed + :param backup_instance: Backup + :param str volume_id: The volume id + :param str name: The name of the backup | snapshot + :return bool: Boolean flag if there is a matched backup | snapshot found + """ + # Try to do a match for the snapshot | backup for name + # in case name is provided and this is mainly + # will be available when run snapshot | backup delete + match_1 = True if name and backup_instance.name == name else False + + # If the name is missing then we can depend on volume just like when we + # remove snapshots for related volume and do not have information + # about snapshot | backup name so we depend on volume id + match_2 = volume_id == backup_instance.volume_id + + return match_1 or match_2 + + +def _populate_volume_with_image_id_from_relationship(volume_config): + """ + This method will try to populate image id for volume if there is a + relationship between volume & image + :param volume_config: volume config required in order to create volume + in openstack + """ + + if volume_config: + image_ids = find_openstack_ids_of_connected_nodes_by_openstack_type( + ctx, IMAGE_OPENSTACK_TYPE) + + if image_ids: + volume_config.update({'imageRef': image_ids[0]}) + + +def _set_volume_runtime_properties(volume): + """ + Set volume configuration as runtime properties so that it can be used + when attach volume as bootable to server, so this configuration will be + required when create a relationship between server and volume + :param volume: Volume instance of openstack.volume.v2.volume.Volume + """ + if volume: + # Check if the availability_zone is set and part of the volume object + if volume.availability_zone: + ctx.instance.runtime_properties[OPENSTACK_AZ_PROPERTY] = \ + volume.availability_zone + + # Check if the volume is bootable so that we can set that as part og + is_bootable = True if volume.is_bootable else False + ctx.instance.runtime_properties[VOLUME_BOOTABLE] = is_bootable + + +def _prepare_volume_backup_instance(volume_resource, backup_config=None): + """ + Prepare volume backup openstack instance so that we can use it to do + backup volume + :param volume_resource: instance of openstack volume resource + :param dict backup_config: Snapshot config data + :return: Return instance of openstack volume backup + """ + + # Prepare client config in order to apply backup + client_config = volume_resource.client_config + # Instance of backup volume + backup = OpenstackVolumeBackup(client_config=client_config, + logger=ctx.logger) + if backup_config: + backup.config = backup_config + return backup + + +def _prepare_volume_snapshot_instance(volume_resource, snapshot_config=None): + """ + Prepare volume snapshot openstack instance so that we can use it to do + snapshot volume + :param volume_resource: instance of openstack volume resource + :param dict snapshot_config: Snapshot config data + :return: Return instance of openstack volume snapshot + """ + + # Prepare client config in order to apply snapshot + client_config = volume_resource.client_config + # Instance of snapshot volume + snapshot = OpenstackVolumeSnapshot(client_config=client_config, + logger=ctx.logger) + if snapshot_config: + snapshot.config = snapshot_config + + return snapshot + + +def _create_volume_backup(volume_resource, backup_name): + """ + This method will handle creating volume backup and make sure it is + created successfully + :param volume_resource: instance of openstack volume resource + :param str backup_name: The backup name + """ + # Prepare config for backup + # Prepare config for backup + backup_config = { + 'name': backup_name, + 'volume_id': volume_resource.resource_id + } + + backup = _prepare_volume_backup_instance(volume_resource, backup_config) + + # Check if the backup id exists or not, if it exists that means the + # backup volume created but still checking its status to make sure it + # is ready to use + if VOLUME_BACKUP_ID in ctx.instance.runtime_properties: + backup.resource_id = \ + ctx.instance.runtime_properties[VOLUME_BACKUP_ID] + + # Check if the backup call is called before or not, so that we can only + # trigger it only once + if VOLUME_BACKUP_TASK not in ctx.instance.runtime_properties: + # Create backup + backup_response = backup.create() + backup_id = backup_response.id + backup.resource_id = backup_id + ctx.instance.runtime_properties[VOLUME_BACKUP_TASK] = True + ctx.instance.runtime_properties[VOLUME_BACKUP_ID] = backup_id + + backup_resource, ready = \ + get_ready_resource_status(backup, + VOLUME_BACKUP_OPENSTACK_TYPE, + VOLUME_STATUS_AVAILABLE, + VOLUME_ERROR_STATUSES) + + if not ready: + raise OperationRetry('Volume backup is still in {0} status'.format( + backup_resource.status)) + else: + del ctx.instance.runtime_properties[VOLUME_BACKUP_TASK] + del ctx.instance.runtime_properties[VOLUME_BACKUP_ID] + + +def _create_volume_snapshot(volume_resource, snapshot_name, snapshot_type): + """ + This method will handle creating volume snapshot and make sure it is + created successfully + :param volume_resource: instance of openstack volume resource + :param str snapshot_name: The name of the snapshot + :param str snapshot_type: The type of the snapshot + """ + + # Prepare config for snapshot + snapshot_config = { + 'name': snapshot_name, + 'volume_id': volume_resource.resource_id, + 'force': True, + 'description': snapshot_type + } + + # Get an instance of snapshot volume ready to create the desired + # snapshot volume + snapshot = \ + _prepare_volume_snapshot_instance(volume_resource, snapshot_config) + + # Check if the snapshot id exists or not, if it exists that mean the + # snapshot volume created but still checking its status to make sure it + # is ready to use + if VOLUME_SNAPSHOT_ID in ctx.instance.runtime_properties: + snapshot.resource_id = \ + ctx.instance.runtime_properties[VOLUME_SNAPSHOT_ID] + + # Check if the snapshot volume task exists or not, if it does not exist + # that means, this is the first time we are running this operation task, + # otherwise it still checking the status to make sure it is finished + if VOLUME_SNAPSHOT_TASK not in ctx.instance.runtime_properties: + # Create snapshot + snapshot_response = snapshot.create() + snapshot_id = snapshot_response.id + snapshot.resource_id = snapshot_id + ctx.instance.runtime_properties[VOLUME_SNAPSHOT_TASK] = True + ctx.instance.runtime_properties[VOLUME_SNAPSHOT_ID] = snapshot_id + + # Check the status of the snapshot process + snapshot_resource, ready = \ + get_ready_resource_status(snapshot, + VOLUME_SNAPSHOT_OPENSTACK_TYPE, + VOLUME_STATUS_AVAILABLE, + VOLUME_ERROR_STATUSES) + + if not ready: + raise OperationRetry('Volume snapshot is still in {0} status'.format( + snapshot_resource.status)) + else: + # Once the snapshot is ready to user, we should clear volume + # snapshot task & snapshot volume id from runtime properties in order + # to allow trigger the operation multiple times + del ctx.instance.runtime_properties[VOLUME_SNAPSHOT_TASK] + del ctx.instance.runtime_properties[VOLUME_SNAPSHOT_ID] + + +def _clean_volume_backups(backup_instance, backup_type, search_opts): + """ + This method will clean all backups | snapshots volume based on provided + backup type and on filter criteria + :param backup_instance: This is an instance of volume backup or + volume snapshot (OpenstackVolumeBackup | OpenstackVolumeSnapshot) + required in order to clean all volume backups/snapshots + :param str backup_type: The type of volume backup (Full backup or snapshot) + :param dict search_opts: Search criteria used in order to lookup the + backups + """ + if all([search_opts, backup_instance]): + name = search_opts.get('name') + volume_id = search_opts.get('volume_id') + # Since list backups volume does not support query filters for volume + # id & name, then we need to take that into consideration since + # passing volume_id & name to the backups api will raise + # InvalidResourceQuery error + search_query = {} + if backup_type == VOLUME_SNAPSHOT_OPENSTACK_TYPE: + search_query = search_opts + + # Right now list volume backup does not support to list backups + # using backup name and volume id, so that we need to list all + # volumes backups and then just do a compare to match the one we + # need to delete + for backup in backup_instance.list(query=search_query): + if _is_volume_backup_matched(backup, volume_id, name): + ctx.logger.debug( + 'Check {0} before delete: {1}:{2}' + ' with state {3}'.format(backup_type, backup.id, + backup.name, backup.status)) + backup_instance.resource_id = backup.id + backup_instance.delete() + + # wait 10 seconds before next check + time.sleep(10) + + for backup in backup_instance.list(query=search_query): + ctx.logger.debug('Check {0} after delete: {1}:{2} with state {3}' + .format(backup_type, backup.id, + backup.name, backup.status)) + if _is_volume_backup_matched(backup, volume_id, name): + return ctx.operation.retry( + message='{0} is still alive'.format(backup.name), + retry_after=30) + else: + raise NonRecoverableError('volume_id, name, backup_instance ' + 'variables cannot all set to None') + + +def _delete_volume_backup(volume_resource, search_opts): + """ + This method will delete volume backup + :param volume_resource: instance of openstack volume resource + :param dict search_opts: Search criteria used in order to lookup the + backups + """ + backup_volume = _prepare_volume_backup_instance(volume_resource) + _clean_volume_backups(backup_volume, + VOLUME_BACKUP_OPENSTACK_TYPE, + search_opts) + + +def _delete_volume_snapshot(volume_resource, search_opts): + """ + This method will delete volume snapshot + :param volume_resource: instance of openstack volume resource + :param dict search_opts: Search criteria used in order to lookup the + snapshots + """ + snapshot_volume = _prepare_volume_snapshot_instance(volume_resource) + _clean_volume_backups(snapshot_volume, + VOLUME_SNAPSHOT_OPENSTACK_TYPE, + search_opts) + + +def _restore_volume_from_backup(volume_resource, backup_name): + """ + This method will use to restore volume backups + :param volume_resource: instance of openstack volume resource + :param str backup_name: The name of the backup + """ + + volume_id = volume_resource.resource_id + backup_volume = _prepare_volume_backup_instance(volume_resource) + # Since backup volume does not allow to filter backup volume, we are + # iterating over all volume backup in order to match the backup name & + # volume id so that we can restore it + for backup in backup_volume.list(): + # if returned more than one backup, use first + if backup.name == backup_name: + ctx.logger.debug( + 'Used first with {0} to {1}'.format(backup.id, volume_id)) + name = 'volume-restore-{0}'.format(backup.id) + backup_volume.restore(backup.id, volume_id, name) + break + else: + raise NonRecoverableError('No such {0} backup.'.format(backup_name)) + + +@with_openstack_resource(OpenstackVolume) +def create(openstack_resource, args={}): + """ + Create openstack volume instance + :param openstack_resource: instance of openstack volume resource + :param args User configuration that could merge/override with + resource configuration + """ + # Check to see if there are some configuration provided via operation + # input so that we can merge them with volume config + merge_resource_config(openstack_resource.config, args) + + # Before create volume we need to check if the current volume node has + # no relationship with image node type, because if there is a + # relationship with image then we need to get the image id from that + # relationship + _populate_volume_with_image_id_from_relationship(openstack_resource.config) + + created_resource = openstack_resource.create() + ctx.instance.runtime_properties[RESOURCE_ID] = created_resource.id + + +@with_openstack_resource(OpenstackVolume) +def start(openstack_resource, **kwargs): + """ + This opeeration task will try to check if the volume created is ready + to use and available + :param openstack_resource: current openstack volume instance + :param kwargs: Extra information provided by operation input + """ + volume = wait_until_status(openstack_resource, + VOLUME_OPENSTACK_TYPE, + VOLUME_STATUS_AVAILABLE, + VOLUME_ERROR_STATUSES) + + # Set volume runtime properties needed when attach bootable volume to + # server + _set_volume_runtime_properties(volume) + + +@with_openstack_resource(OpenstackVolume) +def snapshot_create(openstack_resource, **kwargs): + """ + Create volume backup. + :param openstack_resource: instance of openstack volume resource + :param kwargs: snapshot information provided by workflow + """ + + ctx.logger.info('Create snapshot for {0}'.format( + openstack_resource.resource_id)) + + # Get snapshot information provided by workflow parameters + snapshot_name = kwargs.get('snapshot_name') + snapshot_type = kwargs.get('snapshot_type') + snapshot_incremental = kwargs.get('snapshot_incremental') + + # Generate snapshot name + backup_name = \ + get_snapshot_name('volume', snapshot_name, snapshot_incremental) + + if not snapshot_incremental: + # Create volume backup + _create_volume_backup(openstack_resource, backup_name) + else: + # Create volume snapshot + _create_volume_snapshot(openstack_resource, backup_name, snapshot_type) + + +@with_openstack_resource(OpenstackVolume) +def snapshot_apply(openstack_resource, **kwargs): + """ + This operation task will restore volume from created volume backups + :param openstack_resource: instance of openstack volume resource + :param kwargs: snapshot information provided by workflow + """ + # Get snapshot information provided by workflow parameters + snapshot_name = kwargs.get('snapshot_name') + snapshot_incremental = kwargs.get('snapshot_incremental') + + # Generate snapshot name + backup_name = \ + get_snapshot_name('volume', snapshot_name, snapshot_incremental) + + if not snapshot_incremental: + _restore_volume_from_backup(openstack_resource, backup_name) + else: + raise NonRecoverableError('Apply snapshot is not supported') + + +@with_openstack_resource(OpenstackVolume) +def snapshot_delete(openstack_resource, **kwargs): + """ + Delete volume backup. + :param openstack_resource: instance of openstack volume resource + :param kwargs: snapshot information provided by workflow + """ + + ctx.logger.info('Delete snapshot for {0}'.format( + openstack_resource.resource_id)) + + # Get snapshot information provided by workflow parameters + snapshot_name = kwargs.get('snapshot_name') + snapshot_incremental = kwargs.get('snapshot_incremental') + # Generate snapshot name + snapshot_name = \ + get_snapshot_name('volume', snapshot_name, snapshot_incremental) + + search_opts = \ + { + 'volume_id': openstack_resource.resource_id, + 'name': snapshot_name + } + + # This is a backup stored at object storage must be deleted + if not snapshot_incremental: + _delete_volume_backup(openstack_resource, search_opts) + # This is a snapshot that need to be deleted + else: + _delete_volume_snapshot(openstack_resource, search_opts) + + +@with_openstack_resource(OpenstackVolume) +def delete(openstack_resource): + """ + Delete current openstack volume instance + :param openstack_resource: instance of openstack volume resource + """ + + # Before delete the volume we should check if volume has associated + # snapshots that must be cleaned + search_opts = {'volume_id': openstack_resource.resource_id, } + _delete_volume_snapshot(openstack_resource, search_opts) + + # Only trigger delete api when it is the first time we run this task, + # otherwise we should check the if the volume is really deleted or not + # by keep calling get volume api + if VOLUME_TASK_DELETE not in ctx.instance.runtime_properties: + # Delete volume resource + openstack_resource.delete() + ctx.instance.runtime_properties[VOLUME_TASK_DELETE] = True + + # Make sure that volume are deleting + try: + openstack_resource.get() + raise OperationRetry('Volume {0} is still deleting'.format( + openstack_resource.resource_id)) + except openstack.exceptions.ResourceNotFound: + ctx.logger.info('Volume {0} is deleted successfully'.format( + openstack_resource.resource_id)) + + +@with_openstack_resource(OpenstackVolume) +def list_volumes(openstack_resource, query=None): + """ + List openstack volumes based on filters applied + :param openstack_resource: Instance of current openstack volume + :param kwargs query: Optional query parameters to be sent to limit + the volumes being returned. + """ + volumes = openstack_resource.list(query) + add_resource_list_to_runtime_properties(VOLUME_OPENSTACK_TYPE, volumes) + + +@with_openstack_resource(OpenstackVolume) +def creation_validation(openstack_resource): + """ + This method is to check if we can create volume resource in openstack + :param openstack_resource: Instance of current openstack volume + """ + validate_resource_quota(openstack_resource, VOLUME_OPENSTACK_TYPE) + ctx.logger.debug('OK: volume configuration is valid') diff --git a/openstack_plugin/resources/volume/volume_type.py b/openstack_plugin/resources/volume/volume_type.py new file mode 100644 index 00000000..55dfd903 --- /dev/null +++ b/openstack_plugin/resources/volume/volume_type.py @@ -0,0 +1,41 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Third party imports +from cloudify import ctx + +# Local imports +from openstack_sdk.resources.volume import OpenstackVolumeType +from openstack_plugin.decorators import with_openstack_resource +from openstack_plugin.constants import RESOURCE_ID + + +@with_openstack_resource(OpenstackVolumeType) +def create(openstack_resource): + """ + Create openstack volume type instance + :param openstack_resource: instance of openstack volume type resource + """ + created_resource = openstack_resource.create() + ctx.instance.runtime_properties[RESOURCE_ID] = created_resource.id + + +@with_openstack_resource(OpenstackVolumeType) +def delete(openstack_resource): + """ + Delete current openstack volume type + :param openstack_resource: instance of openstack volume type resource + """ + openstack_resource.delete() diff --git a/openstack_plugin/tests/__init__.py b/openstack_plugin/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/openstack_plugin/tests/base.py b/openstack_plugin/tests/base.py new file mode 100644 index 00000000..9017c442 --- /dev/null +++ b/openstack_plugin/tests/base.py @@ -0,0 +1,220 @@ +# ####### +# Copyright (c) 2018 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + +# Standard imports +import copy +import uuid +import unittest + +# Third party imports +from cloudify.manager import DirtyTrackingDict +from cloudify.state import current_ctx +from cloudify.mocks import ( + MockCloudifyContext, + MockNodeContext, + MockNodeInstanceContext, + MockRelationshipContext, + MockRelationshipSubjectContext, +) + + +class CustomMockNodeContext(MockNodeContext): + def __init__(self, + id=None, + properties=None, + type=None, + type_hierarchy=['cloudify.nodes.Root']): + super(CustomMockNodeContext, self).__init__(id=id, + properties=properties) + self._type = type + self._type_hierarchy = type_hierarchy + + @property + def type(self): + return self._type + + @property + def type_hierarchy(self): + return self._type_hierarchy + + +class OpenStackTestBase(unittest.TestCase): + + def setUp(self): + super(OpenStackTestBase, self).setUp() + + def tearDown(self): + current_ctx.clear() + super(OpenStackTestBase, self).tearDown() + + def _to_DirtyTrackingDict(self, origin): + if not origin: + origin = {} + dirty_dict = DirtyTrackingDict() + for k in origin: + dirty_dict[k] = copy.deepcopy(origin[k]) + return dirty_dict + + @property + def client_config(self): + return { + 'auth_url': 'foo', + 'username': 'foo', + 'password': 'foo', + 'region_name': 'foo', + 'project_name': 'foo' + } + + @property + def resource_config(self): + return { + 'name': 'foo', + 'description': 'foo' + } + + @property + def node_properties(self): + return { + 'client_config': self.client_config, + 'resource_config': self.resource_config + } + + @property + def runtime_properties(self): + return {} + + def get_mock_ctx(self, + test_name, + test_properties={}, + test_runtime_properties={}, + test_relationships=None, + type_hierarchy=['cloudify.nodes.Root'], + test_source=None, + test_target=None, + ctx_operation_name=None): + + operation_ctx = { + 'retry_number': 0, 'name': 'cloudify.interfaces.lifecycle.' + } if not ctx_operation_name else { + 'retry_number': 0, 'name': ctx_operation_name + } + + ctx = MockCloudifyContext( + node_id=test_name, + node_name=test_name, + deployment_id=test_name, + properties=copy.deepcopy(test_properties or self.node_properties), + runtime_properties=self._to_DirtyTrackingDict( + test_runtime_properties or self.runtime_properties + ), + source=test_source, + target=test_target, + relationships=test_relationships, + operation=operation_ctx + ) + + ctx.node.type_hierarchy = type_hierarchy + + return ctx + + def _prepare_context_for_operation(self, + test_name, + test_properties={}, + test_runtime_properties={}, + test_relationships=None, + type_hierarchy=['cloudify.nodes.Root'], + test_source=None, + test_target=None, + ctx_operation_name=None): + self._ctx = self.get_mock_ctx( + test_name=test_name, + test_properties=test_properties, + test_runtime_properties=test_runtime_properties, + test_relationships=test_relationships, + type_hierarchy=type_hierarchy, + test_source=test_source, + test_target=test_target, + ctx_operation_name=ctx_operation_name) + current_ctx.set(self._ctx) + + def get_mock_relationship_ctx(self, + deployment_name=None, + node_id=None, + test_properties={}, + test_runtime_properties={}, + test_source=None, + test_target=None): + + ctx = MockCloudifyContext( + node_id=node_id, + deployment_id=deployment_name, + properties=copy.deepcopy(test_properties), + source=test_source, + target=test_target, + runtime_properties=copy.deepcopy(test_runtime_properties)) + return ctx + + def get_mock_relationship_ctx_for_node(self, rel_specs): + """ + This method will generate list of mock relationship associated with + certain node + :param rel_specs: Relationships is an ordered collection of + relationship specs - dicts with the keys "node" and "instance" used + to construct the MockNodeContext and the MockNodeInstanceContext, + and optionally a "type" key. + Examples: [ + {}, + {"node": {"id": 5}}, + { + "type": "some_type", + "instance": { + "id": 3, + "runtime_properties":{} + } + } + ] + :return list: Return list of "MockRelationshipContext" instances + """ + + relationships = [] + for rel_spec in rel_specs: + node = rel_spec.get('node', {}) + node_id = node.pop('id', uuid.uuid4().hex) + + instance = rel_spec.get('instance', {}) + instance_id = instance.pop('id', '{0}_{1}'.format( + node_id, uuid.uuid4().hex)) + if 'properties' not in node: + node['properties'] = {} + + mock_data = { + 'id': node_id, + 'properties': node['properties'], + } + + if rel_spec.get('type_hierarchy'): + mock_data['type_hierarchy'] = rel_spec['type_hierarchy'] + + node_ctx = CustomMockNodeContext(**mock_data) + instance_ctx = MockNodeInstanceContext(id=instance_id, **instance) + + rel_subject_ctx = MockRelationshipSubjectContext( + node=node_ctx, instance=instance_ctx) + rel_type = rel_spec.get('type') + rel_ctx = MockRelationshipContext(target=rel_subject_ctx, + type=rel_type) + relationships.append(rel_ctx) + + return relationships diff --git a/openstack_plugin/tests/compute/__init__.py b/openstack_plugin/tests/compute/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/openstack_plugin/tests/compute/test_flavor.py b/openstack_plugin/tests/compute/test_flavor.py new file mode 100644 index 00000000..505d8b1d --- /dev/null +++ b/openstack_plugin/tests/compute/test_flavor.py @@ -0,0 +1,167 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Third party imports +import mock +import openstack.compute.v2.flavor +from cloudify.exceptions import NonRecoverableError + +# Local imports +from openstack_plugin.tests.base import OpenStackTestBase +from openstack_plugin.resources.compute import flavor +from openstack_plugin.constants import (RESOURCE_ID, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + FLAVOR_OPENSTACK_TYPE) + + +@mock.patch('openstack.connect') +class KeyPairTestCase(OpenStackTestBase): + + def setUp(self): + super(KeyPairTestCase, self).setUp() + + @property + def resource_config(self): + return { + 'name': 'test_flavor', + 'description': 'flavor_description' + } + + def test_create(self, mock_connection): + # Prepare the context for create operation + self._prepare_context_for_operation( + test_name='FlavorTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.create') + + flavor_instance = openstack.compute.v2.flavor.Flavor(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_flavor', + 'links': '2', + 'description': 'Testing flavor', + 'os-flavor-access:is_public': True, + 'ram': 6, + 'vcpus': 8, + 'swap': 8 + + }) + # Mock flavor response + mock_connection().compute.create_flavor = \ + mock.MagicMock(return_value=flavor_instance) + + # Call create flavor + flavor.create() + + self.assertEqual(self._ctx.instance.runtime_properties[RESOURCE_ID], + 'a95b5509-c122-4c2f-823e-884bb559afe8') + + self.assertEqual( + self._ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY], + 'test_flavor') + + self.assertEqual( + self._ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY], + FLAVOR_OPENSTACK_TYPE) + + def test_delete(self, mock_connection): + # Prepare the context for delete operation + self._prepare_context_for_operation( + test_name='FlavorTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.delete') + + flavor_instance = openstack.compute.v2.flavor.Flavor(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_flavor', + 'links': '2', + 'description': 'Testing flavor', + 'os-flavor-access:is_public': True, + 'ram': 6, + 'vcpus': 8, + 'swap': 8 + + }) + # Mock delete flavor response + mock_connection().compute.delete_flavor = \ + mock.MagicMock(return_value=flavor_instance) + + # Mock get flavor response + mock_connection().compute.get_flavor = \ + mock.MagicMock(return_value=flavor_instance) + + # Call delete flavor + flavor.delete() + + for attr in [RESOURCE_ID, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY]: + self.assertNotIn(attr, + self._ctx.instance.runtime_properties) + + def test_update(self, _): + # Prepare the context for update operation + self._prepare_context_for_operation( + test_name='FlavorTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.update') + + updated_config = { + 'name': 'Updated Name' + } + + with self.assertRaises(NonRecoverableError): + # Call update flavor + flavor.update(args=updated_config) + + def test_list_flavors(self, mock_connection): + # Prepare the context for delete operation + self._prepare_context_for_operation( + test_name='FlavorTestCase', + ctx_operation_name='cloudify.interfaces.operations.list') + + flavors = [ + openstack.compute.v2.flavor.FlavorDetail(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_flavor_1', + 'links': '2', + 'description': 'Testing flavor 1', + 'os-flavor-access:is_public': True, + 'ram': 6, + 'vcpus': 8, + 'swap': 8 + }), + openstack.compute.v2.flavor.FlavorDetail(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'name': 'test_flavor_2', + 'links': '3', + 'description': 'Testing flavor 2', + 'os-flavor-access:is_public': True, + 'ram': 4, + 'vcpus': 3, + 'swap': 3 + }) + ] + # Mock list flavors response + mock_connection().compute.flavors = \ + mock.MagicMock(return_value=flavors) + + flavor.list_flavors() + + # Check if the flavor list saved as runtime properties + self.assertIn( + 'flavor_list', + self._ctx.instance.runtime_properties) + + # Check the size of flavor list + self.assertEqual( + len(self._ctx.instance.runtime_properties['flavor_list']), 2) diff --git a/openstack_plugin/tests/compute/test_host_aggregate.py b/openstack_plugin/tests/compute/test_host_aggregate.py new file mode 100644 index 00000000..93e6dd06 --- /dev/null +++ b/openstack_plugin/tests/compute/test_host_aggregate.py @@ -0,0 +1,272 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Third party imports +import mock +import openstack.compute.v2.aggregate +from cloudify.exceptions import NonRecoverableError + +# Local imports +from openstack_plugin.tests.base import OpenStackTestBase +from openstack_plugin.resources.compute import host_aggregate +from openstack_plugin.constants import (RESOURCE_ID, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + HOST_AGGREGATE_OPENSTACK_TYPE) + + +@mock.patch('openstack.connect') +class HostAggregateTestCase(OpenStackTestBase): + + def setUp(self): + super(HostAggregateTestCase, self).setUp() + + @property + def resource_config(self): + return { + 'name': 'test_host_aggregate', + 'description': 'host_aggregate_description' + } + + @property + def node_properties(self): + properties = super(HostAggregateTestCase, self).node_properties + properties['metadata'] = {'key-1': 'test-1', 'key-2': 'test-2'} + return properties + + def test_create(self, mock_connection): + # Prepare the context for create operation + self._prepare_context_for_operation( + test_name='HostAggregateTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.create') + + aggregate_instance = openstack.compute.v2.aggregate.Aggregate(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_host_aggregate', + 'availability_zone': 'test_availability_zone', + }) + # Mock aggregate response + mock_connection().compute.create_aggregate = \ + mock.MagicMock(return_value=aggregate_instance) + + # Call create aggregate + host_aggregate.create() + + self.assertEqual(self._ctx.instance.runtime_properties[RESOURCE_ID], + 'a95b5509-c122-4c2f-823e-884bb559afe8') + + self.assertEqual( + self._ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY], + 'test_host_aggregate') + + self.assertEqual( + self._ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY], + HOST_AGGREGATE_OPENSTACK_TYPE) + + def test_configure(self, mock_connection): + # Prepare the context for configure operation + self._prepare_context_for_operation( + test_name='HostAggregateTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.configure') + + old_aggregate_instance = openstack.compute.v2.aggregate.Aggregate(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_host_aggregate', + 'availability_zone': 'test_availability_zone', + }) + + new_aggregate_instance = openstack.compute.v2.aggregate.Aggregate(**{ + 'id': '1', + 'name': 'test_host_aggregate', + 'availability_zone': 'test_availability_zone', + 'metadata': { + 'key-1': 'test-1', + 'key-2': 'test-2' + } + }) + + # Mock get aggregate response + mock_connection().compute.get_aggregate = \ + mock.MagicMock(return_value=old_aggregate_instance) + + # Mock aggregate response + mock_connection().compute.set_aggregate_metadata = \ + mock.MagicMock(return_value=new_aggregate_instance) + + # Call configure aggregate + host_aggregate.set_metadata() + + def test_update(self, _): + # Prepare the context for update operation + self._prepare_context_for_operation( + test_name='FlavorTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.update') + + updated_config = { + 'name': 'Updated Name' + } + + with self.assertRaises(NonRecoverableError): + # Call update aggregate + host_aggregate.update(args=updated_config) + + def test_delete(self, mock_connection): + # Prepare the context for configure operation + self._prepare_context_for_operation( + test_name='HostAggregateTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.delete') + + aggregate_instance = openstack.compute.v2.aggregate.Aggregate(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_host_aggregate', + 'availability_zone': 'test_availability_zone', + }) + + # Mock get aggregate response + mock_connection().compute.get_aggregate = \ + mock.MagicMock(return_value=aggregate_instance) + + # Mock aggregate response + mock_connection().compute.delete_aggregate = \ + mock.MagicMock(return_value=None) + + # Call delete aggregate + host_aggregate.delete() + + for attr in [RESOURCE_ID, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY]: + self.assertNotIn(attr, + self._ctx.instance.runtime_properties) + + def test_list_aggregates(self, mock_connection): + # Prepare the context for list aggregates operation + self._prepare_context_for_operation( + test_name='HostAggregateTestCase', + ctx_operation_name='cloudify.interfaces.operations.list') + + aggregate_list = [ + openstack.compute.v2.aggregate.Aggregate(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_aggregate_1', + 'availability_zone': 'test_availability_zone_1', + }), + openstack.compute.v2.aggregate.Aggregate(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'name': 'test_aggregate_2', + 'availability_zone': 'test_availability_zone_2', + }), + ] + + # Mock list aggregate response + mock_connection().compute.aggregates = \ + mock.MagicMock(return_value=aggregate_list) + + # Call list aggregates + host_aggregate.list_aggregates() + + # Check if the aggregates list saved as runtime properties + self.assertIn( + 'aggregate_list', + self._ctx.instance.runtime_properties) + + # Check the size of aggregate list + self.assertEqual( + len(self._ctx.instance.runtime_properties['aggregate_list']), 2) + + def test_add_hosts(self, mock_connection): + # Prepare the context for add hosts operation + self._prepare_context_for_operation( + test_name='HostAggregateTestCase', + ctx_operation_name='cloudify.interfaces.operations.add_hosts') + + hosts_to_add = ['host-1', 'host-2'] + old_aggregate_instance = openstack.compute.v2.aggregate.Aggregate(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_host_aggregate', + 'availability_zone': 'test_availability_zone', + }) + + new_aggregate_instance = openstack.compute.v2.aggregate.Aggregate(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_host_aggregate', + 'availability_zone': 'test_availability_zone', + 'hosts': hosts_to_add + }) + + # Mock get aggregate response + mock_connection().compute.get_aggregate = \ + mock.MagicMock(return_value=old_aggregate_instance) + + # Mock add host aggregate response + mock_connection().compute.add_host_to_aggregate = \ + mock.MagicMock(return_value=new_aggregate_instance) + + # Call add hosts to aggregate + host_aggregate.add_hosts(hosts=hosts_to_add) + + def test_add_invalid_hosts(self, _): + # Prepare the context for add hosts operation + self._prepare_context_for_operation( + test_name='HostAggregateTestCase', + ctx_operation_name='cloudify.interfaces.operations.add_hosts') + + invalid_hosts_to_add = 'invalid data' + with self.assertRaises(NonRecoverableError): + # Call add hosts to aggregate + host_aggregate.add_hosts(hosts=invalid_hosts_to_add) + + def test_remove_hosts(self, mock_connection): + # Prepare the context for remove hosts operation + self._prepare_context_for_operation( + test_name='HostAggregateTestCase', + ctx_operation_name='cloudify.interfaces.operations.remove_hosts') + + hosts_to_remove = ['host-1'] + old_aggregate_instance = openstack.compute.v2.aggregate.Aggregate(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_host_aggregate', + 'availability_zone': 'test_availability_zone', + 'hosts': ['host-1', 'host-2'] + }) + + new_aggregate_instance = openstack.compute.v2.aggregate.Aggregate(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_host_aggregate', + 'availability_zone': 'test_availability_zone', + 'hosts': ['host-2'] + }) + + # Mock get aggregate response + mock_connection().compute.get_aggregate = \ + mock.MagicMock(return_value=old_aggregate_instance) + + # Mock remove host aggregate response + mock_connection().compute.remove_host_from_aggregate = \ + mock.MagicMock(return_value=new_aggregate_instance) + + # Call remove hosts from aggregate + host_aggregate.remove_hosts(hosts=hosts_to_remove) + + def test_remove_invalid_hosts(self, _): + # Prepare the context for remove hosts operation + self._prepare_context_for_operation( + test_name='HostAggregateTestCase', + ctx_operation_name='cloudify.interfaces.operations.remove_hosts') + + invalid_hosts_to_remove = 'invalid data' + with self.assertRaises(NonRecoverableError): + # Call add hosts to aggregate + host_aggregate.remove_hosts(hosts=invalid_hosts_to_remove) diff --git a/openstack_plugin/tests/compute/test_keypair.py b/openstack_plugin/tests/compute/test_keypair.py new file mode 100644 index 00000000..ac535540 --- /dev/null +++ b/openstack_plugin/tests/compute/test_keypair.py @@ -0,0 +1,174 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Third party imports +import mock +import openstack.compute.v2.keypair + +# Local imports +from openstack_plugin.tests.base import OpenStackTestBase +from openstack_plugin.resources.compute import keypair +from openstack_plugin.constants import (RESOURCE_ID, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + KEYPAIR_OPENSTACK_TYPE) + + +@mock.patch('openstack.connect') +class KeyPairTestCase(OpenStackTestBase): + + def setUp(self): + super(KeyPairTestCase, self).setUp() + + @property + def resource_config(self): + return { + 'name': 'test_key_pair', + 'description': 'key_pair_description' + } + + def test_create(self, mock_connection): + # Prepare the context for create operation + self._prepare_context_for_operation( + test_name='KeyPairTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.create') + + keypair_instance = openstack.compute.v2.keypair.Keypair(**{ + 'id': 'test_key_pair', + 'name': 'test_key_pair', + 'fingerprint': 'test_fingerprint', + 'public_key': 'test_public_key', + 'private_key': 'test_private_key', + + }) + # Mock keypair response + mock_connection().compute.create_keypair = \ + mock.MagicMock(return_value=keypair_instance) + # Call create keypair + keypair.create() + + self.assertEqual(self._ctx.instance.runtime_properties[RESOURCE_ID], + 'test_key_pair') + + self.assertEqual( + self._ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY], + 'test_key_pair') + + self.assertEqual( + self._ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY], + KEYPAIR_OPENSTACK_TYPE) + + self.assertEqual( + self._ctx.instance.runtime_properties['public_key'], + 'test_public_key') + + self.assertEqual( + self._ctx.instance.runtime_properties['private_key'], + 'test_private_key') + + def test_delete(self, mock_connection): + # Prepare the context for delete operation + self._prepare_context_for_operation( + test_name='KeyPairTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.delete') + + keypair_instance = openstack.compute.v2.keypair.Keypair(**{ + 'id': 'test_key_pair', + 'name': 'test_key_pair', + 'fingerprint': 'test_fingerprint', + 'public_key': 'test_public_key', + 'private_key': 'test_private_key', + + }) + # Mock delete keypair response + mock_connection().compute.delete_keypair = \ + mock.MagicMock(return_value=None) + # Mock get keypair + mock_connection().compute.get_keypair = \ + mock.MagicMock(return_value=keypair_instance) + + # Call delete keypair + keypair.delete() + + for attr in [RESOURCE_ID, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + 'public_key', + 'private_key']: + self.assertNotIn(attr, + self._ctx.instance.runtime_properties) + + def test_list_keypairs(self, mock_connection): + # Prepare the context for list keypairs operation + self._prepare_context_for_operation( + test_name='KeyPairTestCase', + ctx_operation_name='cloudify.interfaces.operations.list') + + keypair_list = [ + openstack.compute.v2.keypair.Keypair(**{ + 'name': 'test_key_pair_1', + 'fingerprint': 'test_fingerprint_1', + 'public_key': 'test_public_key_1' + }), + openstack.compute.v2.keypair.Keypair(**{ + 'name': 'test_key_pair_2', + 'fingerprint': 'test_fingerprint_2', + 'public_key': 'test_public_key_2' + }), + ] + # Mock list keypairs + mock_connection().compute.keypairs = \ + mock.MagicMock(return_value=keypair_list) + + # Call list keypair + keypair.list_keypairs() + + # Check if the keypairs list saved as runtime properties + self.assertIn( + 'key_pair_list', + self._ctx.instance.runtime_properties) + + # Check the size of keypairs list + self.assertEqual( + len(self._ctx.instance.runtime_properties['key_pair_list']), 2) + + @mock.patch('openstack_sdk.common.OpenstackResource.get_quota_sets') + def test_creation_validation(self, mock_quota_sets, mock_connection): + # Prepare the context for creation validation keypairs operation + self._prepare_context_for_operation( + test_name='KeyPairTestCase', + ctx_operation_name='cloudify.interfaces.validation.creation') + + keypair_list = [ + openstack.compute.v2.keypair.Keypair(**{ + 'name': 'test_key_pair_1', + 'fingerprint': 'test_fingerprint_1', + 'public_key': 'test_public_key_1' + }), + openstack.compute.v2.keypair.Keypair(**{ + 'name': 'test_key_pair_2', + 'fingerprint': 'test_fingerprint_2', + 'public_key': 'test_public_key_2' + }), + ] + # Mock list keypairs + mock_connection().compute.keypairs = \ + mock.MagicMock(return_value=keypair_list) + + # Mock the quota size response + mock_quota_sets.return_value = 20 + + # Call creation validation + keypair.creation_validation() diff --git a/openstack_plugin/tests/compute/test_server.py b/openstack_plugin/tests/compute/test_server.py new file mode 100644 index 00000000..4def69b2 --- /dev/null +++ b/openstack_plugin/tests/compute/test_server.py @@ -0,0 +1,1826 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Third party imports +import mock +import openstack.compute.v2.server +import openstack.compute.v2.server_interface +import openstack.compute.v2.volume_attachment +import openstack.compute.v2.keypair +import openstack.image.v2.image +import openstack.exceptions +from cloudify.state import current_ctx +from cloudify.exceptions import (OperationRetry, NonRecoverableError) +from cloudify.mocks import ( + MockContext, + MockNodeContext, + MockNodeInstanceContext, +) + + +# Local imports +from openstack_plugin.tests.base import OpenStackTestBase +from openstack_plugin.resources.compute import server +from openstack_plugin.utils import (get_snapshot_name, + generate_attachment_volume_key) +from openstack_plugin.constants import (RESOURCE_ID, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + FLOATING_IP_OPENSTACK_TYPE, + SECURITY_GROUP_OPENSTACK_TYPE, + VOLUME_OPENSTACK_TYPE, + SERVER_OPENSTACK_TYPE, + NETWORK_OPENSTACK_TYPE, + PORT_OPENSTACK_TYPE, + KEYPAIR_OPENSTACK_TYPE, + SERVER_GROUP_OPENSTACK_TYPE, + NETWORK_NODE_TYPE, + PORT_NODE_TYPE, + KEYPAIR_NODE_TYPE, + VOLUME_NODE_TYPE, + SERVER_GROUP_NODE_TYPE, + SERVER_TASK_DELETE, + SERVER_TASK_START, + SERVER_TASK_STOP, + SERVER_INTERFACE_IDS, + SERVER_TASK_BACKUP_DONE, + SERVER_TASK_RESTORE_STATE, + VOLUME_ATTACHMENT_TASK, + VOLUME_DETACHMENT_TASK, + VOLUME_ATTACHMENT_ID, + SERVER_ACTION_STATUS_DONE, + SERVER_ACTION_STATUS_PENDING) + + +@mock.patch('openstack.connect') +class ServerTestCase(OpenStackTestBase): + + def setUp(self): + super(ServerTestCase, self).setUp() + self.type_hierarchy = ['cloudify.nodes.Root', 'cloudify.nodes.Compute'] + + def _pepare_relationship_context_for_operation(self, + deployment_id, + source, + target, + node_id=None): + + self._ctx = self.get_mock_relationship_ctx( + node_id=node_id, + deployment_name=deployment_id, + test_source=source, + test_target=target) + current_ctx.set(self._ctx) + + @property + def node_properties(self): + properties = super(ServerTestCase, self).node_properties + properties['os_family'] = 'Linux' + properties['device_name'] = 'test-device' + return properties + + def test_create(self, mock_connection): + # Prepare the context for create operation + rel_specs = [ + { + 'node': { + 'id': 'network-1', + 'properties': { + 'client_config': self.client_config, + 'resource_config': { + 'name': 'test-network', + } + } + }, + 'instance': { + 'id': 'network-1-efrgsd', + 'runtime_properties': { + RESOURCE_ID: 'a95b5509-c122-4c2f-823e-884bb559afe4', + OPENSTACK_TYPE_PROPERTY: NETWORK_OPENSTACK_TYPE, + OPENSTACK_NAME_PROPERTY: 'test-network' + } + }, + 'type': NETWORK_NODE_TYPE, + }, + { + 'node': { + 'id': 'port-1', + 'properties': { + 'client_config': self.client_config, + 'resource_config': { + 'name': 'test-port', + } + } + }, + 'instance': { + 'id': 'port-1-efrgsd', + 'runtime_properties': { + RESOURCE_ID: 'a95b5509-c122-4c2f-823e-884bb559afe2', + OPENSTACK_TYPE_PROPERTY: PORT_OPENSTACK_TYPE, + OPENSTACK_NAME_PROPERTY: 'test-port' + } + }, + 'type': PORT_NODE_TYPE, + }, + { + 'node': { + 'id': 'volume-1', + 'properties': { + 'client_config': self.client_config, + 'resource_config': { + 'name': 'test-volume', + } + } + }, + 'instance': { + 'id': 'volume-1-efrgsd', + 'runtime_properties': { + RESOURCE_ID: 'a95b5509-c122-4c2f-823e-884bb559afe1', + OPENSTACK_TYPE_PROPERTY: VOLUME_OPENSTACK_TYPE, + OPENSTACK_NAME_PROPERTY: 'test-volume' + } + }, + 'type': VOLUME_NODE_TYPE, + }, + { + 'node': { + 'id': 'keypair-1', + 'properties': { + 'client_config': self.client_config, + 'resource_config': { + 'name': 'test-keypair', + } + } + }, + 'instance': { + 'id': 'keypair-1-efrgsd', + 'runtime_properties': { + RESOURCE_ID: 'a95b5509-c122-4c2f-823e-884bb559afe0', + OPENSTACK_TYPE_PROPERTY: KEYPAIR_OPENSTACK_TYPE, + OPENSTACK_NAME_PROPERTY: 'test-keypair' + } + }, + 'type': KEYPAIR_NODE_TYPE, + }, + { + 'node': { + 'id': 'server-group-1', + 'properties': { + 'client_config': self.client_config, + 'resource_config': { + 'name': 'test-server-group', + } + } + }, + 'instance': { + 'id': 'server-group-1-efrgsd', + 'runtime_properties': { + RESOURCE_ID: 'a95b5509-c122-4c2f-823e-884bb559afe9', + OPENSTACK_TYPE_PROPERTY: SERVER_GROUP_OPENSTACK_TYPE, + OPENSTACK_NAME_PROPERTY: 'test-server-group' + } + }, + 'type': SERVER_GROUP_NODE_TYPE, + 'type_hierarchy': [SERVER_GROUP_NODE_TYPE, + 'cloudify.nodes.Root'] + } + ] + server_rels = self.get_mock_relationship_ctx_for_node(rel_specs) + # Prepare the context for create operation + self._prepare_context_for_operation( + test_name='ServerTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.create', + type_hierarchy=self.type_hierarchy, + test_relationships=server_rels) + + server_instance = openstack.compute.v2.server.Server(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + + }) + mock_connection().compute.create_server = \ + mock.MagicMock(return_value=server_instance) + server.create() + + # Check if the resource id is already set or not + self.assertIn( + RESOURCE_ID, + self._ctx.instance.runtime_properties) + + # Check if the server payload is assigned for the created server + self.assertIn( + SERVER_OPENSTACK_TYPE, + self._ctx.instance.runtime_properties) + + def test_create_external_resource(self, mock_connection): + properties = dict() + # Enable external resource + properties['use_external_resource'] = True + + # Add node properties config to this dict + properties.update(self.node_properties) + # Reset resource config since we are going to use external resource + # and do not care about the resource config data + properties['resource_config'] = {} + + # Prepare the context for create operation + rel_specs = [ + { + 'node': { + 'id': 'network-1', + 'properties': { + 'client_config': self.client_config, + 'resource_config': { + 'name': 'test-network', + } + } + }, + 'instance': { + 'id': 'network-1-efrgsd', + 'runtime_properties': { + RESOURCE_ID: 'a95b5509-c122-4c2f-823e-884bb559afe4', + OPENSTACK_TYPE_PROPERTY: NETWORK_OPENSTACK_TYPE, + OPENSTACK_NAME_PROPERTY: 'test-network' + } + }, + 'type': NETWORK_NODE_TYPE, + }, + { + 'node': { + 'id': 'port-1', + 'properties': { + 'client_config': self.client_config, + 'resource_config': { + 'name': 'test-port', + } + } + }, + 'instance': { + 'id': 'port-1-efrgsd', + 'runtime_properties': { + RESOURCE_ID: 'a95b5509-c122-4c2f-823e-884bb559afe2', + OPENSTACK_TYPE_PROPERTY: PORT_OPENSTACK_TYPE, + OPENSTACK_NAME_PROPERTY: 'test-port' + } + }, + 'type': PORT_NODE_TYPE, + }, + { + 'node': { + 'id': 'keypair-1', + 'properties': { + 'client_config': self.client_config, + 'resource_config': { + 'name': 'test-keypair', + } + } + }, + 'instance': { + 'id': 'keypair-1-efrgsd', + 'runtime_properties': { + RESOURCE_ID: 'a95b5509-c122-4c2f-823e-884bb559afe0', + OPENSTACK_TYPE_PROPERTY: KEYPAIR_OPENSTACK_TYPE, + OPENSTACK_NAME_PROPERTY: 'test-keypair', + 'use_external_resource': True, + } + }, + 'type': KEYPAIR_NODE_TYPE, + } + ] + server_rels = self.get_mock_relationship_ctx_for_node(rel_specs) + # Prepare the context for create operation + self._prepare_context_for_operation( + test_name='ServerTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.create', + type_hierarchy=self.type_hierarchy, + test_properties=properties, + test_relationships=server_rels, + test_runtime_properties={ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8' + }) + + old_server_instance = openstack.compute.v2.server.Server(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': { + 'network-1': [ + { + 'OS-EXT-IPS:type': 'fixed', + 'addr': '10.1.0.1', + 'version': 4 + } + ] + }, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test-keypair', + 'networks': [ + { + 'port_id': 'b95b5509-c122-4c2f-823e-884bb559afe2' + }, + { + 'uuid': 'e95b5509-c122-4c2f-823e-884bb559afe1' + } + ] + }) + + updated_server_instance = openstack.compute.v2.server.Server(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '10.1.0.1', + 'access_ipv6': '', + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test-keypair', + 'addresses': { + 'network-1': [ + { + 'OS-EXT-IPS:type': 'fixed', + 'addr': '10.1.0.1', + 'version': 4 + } + ], + 'network-2': [ + { + 'OS-EXT-IPS:type': 'fixed', + 'addr': '10.2.0.1', + 'version': 4 + } + ] + }, + 'networks': [ + { + 'port_id': 'b95b5509-c122-4c2f-823e-884bb559afe2' + }, + { + 'port_id': 'a95b5509-c122-4c2f-823e-884bb559afe2' + }, + { + 'uuid': 'e95b5509-c122-4c2f-823e-884bb559afe1' + }, + { + 'uuid': 'a95b5509-c122-4c2f-823e-884bb559afe4' + } + ] + }) + + server_interface = \ + openstack.compute.v2.server_interface.ServerInterface(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afb1', + 'net_id': 'a95b5509-c122-4c2f-823e-884bb559cfb2', + 'port_id': 'a95b5509-c122-4c2f-823e-884bb559afb3', + 'server_id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + }) + + port_interface = \ + openstack.compute.v2.server_interface.ServerInterface(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afa5', + 'net_id': 'a95b5509-c122-4c2f-823e-884bb559cfs3', + 'port_id': 'a95b5509-c122-4c2f-823e-884bb559afe2', + 'server_id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + }) + + network_interface = \ + openstack.compute.v2.server_interface.ServerInterface(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afa7', + 'net_id': 'a95b5509-c122-4c2f-823e-884bb559afe4', + 'port_id': 'a95b5509-c122-4c2f-823e-884bb559eae3', + 'server_id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + }) + + keypair_instance = openstack.compute.v2.keypair.Keypair(**{ + 'id': 'test-keypair', + 'name': 'test-keypair', + 'fingerprint': 'test_fingerprint', + + }) + + # Mock keypair response gor get + mock_connection().compute.get_keypair = \ + mock.MagicMock(return_value=keypair_instance) + + # Mock get operation in two places + # First one will be when get the server for the first time + # Second one will be when we update the server with all interfaces + # Third one will be when set the runtime properties + mock_connection().compute.get_server = \ + mock.MagicMock(side_effect=[old_server_instance, + updated_server_instance, + updated_server_instance]) + + # Mock create server interface operation + # Create server interface will be called in multiple places + # The first one when adding interface from port node + # The second one will be when adding interface from network node + mock_connection().compute.create_server_interface = \ + mock.MagicMock(side_effect=[port_interface, network_interface]) + + # Mock list server interface operation + # The first one is when check the attached interface to external server + # The second one will contains the attached interface + the port + # interface added + mock_connection().compute.server_interfaces = \ + mock.MagicMock(side_effect=[[server_interface], + [server_interface, port_interface]]) + + server.create() + + # Check if the resource id is already set or not + self.assertEqual( + 'a95b5509-c122-4c2f-823e-884bb559afe8', + self._ctx.instance.runtime_properties[RESOURCE_ID]) + + # Check if the server payload is assigned for the created server + self.assertEqual( + SERVER_OPENSTACK_TYPE, + self._ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY]) + + for interface_id in ['a95b5509-c122-4c2f-823e-884bb559afa5', + 'a95b5509-c122-4c2f-823e-884bb559afa7']: + self.assertTrue( + interface_id in + self._ctx.instance.runtime_properties[SERVER_INTERFACE_IDS]) + + self.assertEqual( + '10.1.0.1', + self._ctx.instance.runtime_properties['access_ipv4']) + + self.assertTrue(self._ctx.instance.runtime_properties['ip'] + in ['10.1.0.1', '10.2.0.1']) + + self.assertEqual( + 2, + len(self._ctx.instance.runtime_properties['ipv4_addresses'])) + + @mock.patch('openstack_plugin.resources.compute.server' + '._get_user_password') + @mock.patch('openstack_plugin.resources.compute.server' + '._set_server_ips_runtime_properties') + def test_configure(self, + mock_ips_runtime_properties, + mock_user_password, + mock_connection): + # Prepare the context for configure operation + self._prepare_context_for_operation( + test_name='ServerTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.configure', + type_hierarchy=self.type_hierarchy) + server_instance = openstack.compute.v2.server.Server(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + 'status': 'ACTIVE' + + }) + mock_connection().compute.get_server = \ + mock.MagicMock(return_value=server_instance) + + server.configure() + mock_ips_runtime_properties.assert_called() + mock_user_password.assert_called() + + @mock.patch('openstack_plugin.resources.compute.server' + '._get_user_password') + @mock.patch('openstack_plugin.resources.compute.server' + '._set_server_ips_runtime_properties') + def test_configure_with_retry(self, + mock_ips_runtime_properties, + mock_user_password, + mock_connection): + # Prepare the context for configure operation + self._prepare_context_for_operation( + test_name='ServerTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.configure', + type_hierarchy=self.type_hierarchy) + server_instance = openstack.compute.v2.server.Server(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + 'status': 'UNKNOWN' + + }) + mock_connection().compute.get_server = \ + mock.MagicMock(return_value=server_instance) + + with self.assertRaises(OperationRetry): + server.configure() + mock_ips_runtime_properties.assert_not_called() + mock_user_password.assert_not_called() + + @mock.patch('openstack_plugin.resources.compute.server' + '._get_user_password') + @mock.patch('openstack_plugin.resources.compute.server' + '._set_server_ips_runtime_properties') + def test_configure_with_error(self, + mock_ips_runtime_properties, + mock_user_password, + mock_connection): + # Prepare the context for configure operation + self._prepare_context_for_operation( + test_name='ServerTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.configure', + type_hierarchy=self.type_hierarchy) + server_instance = openstack.compute.v2.server.Server(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + 'status': 'ERROR' + + }) + mock_connection().compute.get_server = \ + mock.MagicMock(return_value=server_instance) + + with self.assertRaises(NonRecoverableError): + server.configure() + mock_ips_runtime_properties.assert_not_called() + mock_user_password.assert_not_called() + + def test_stop(self, mock_connection): + # Prepare the context for stop operation + self._prepare_context_for_operation( + test_name='ServerTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.stop', + type_hierarchy=self.type_hierarchy, + test_runtime_properties={ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8' + }) + server_instance = openstack.compute.v2.server.Server(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + 'status': 'ACTIVE', + + }) + + stopped_server_instance = openstack.compute.v2.server.Server(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + 'status': 'SHUTOFF', + + }) + server_interfaces = [ + openstack.compute.v2.server_interface.ServerInterface(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afa8', + 'net_id': 'a95b5509-c122-4c2f-823e-884bb559cfe8', + 'port_id': 'a95b5509-c122-4c2f-823e-884bb559efe8', + 'server_id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + }), + openstack.compute.v2.server_interface.ServerInterface(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afa7', + 'net_id': 'a95b5509-c122-4c2f-823e-884bb559cae8', + 'port_id': 'a95b5509-c122-4c2f-823e-884bb559eae8', + 'server_id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + }) + ] + + # Mock stop operation + mock_connection().compute.stop_server = \ + mock.MagicMock(return_value=None) + + # Mock get operation + mock_connection().compute.get_server = \ + mock.MagicMock(side_effect=[server_instance, + stopped_server_instance, + stopped_server_instance]) + + # Mock get server interfaces operation + mock_connection().compute.server_interfaces = \ + mock.MagicMock(return_value=server_interfaces) + + # Mock get server interfaces operation + mock_connection().compute.delete_server_interface = \ + mock.MagicMock(return_value=None) + + # Stop the server + server.stop() + + # Check if the resource id is already set or not + self.assertIn( + SERVER_TASK_STOP, + self._ctx.instance.runtime_properties) + + @mock.patch('openstack_sdk.resources.compute' + '.OpenstackServer.delete_server_interface') + def test_stop_external_resource(self, + mock_delete_server_interface, + mock_connection): + + properties = dict() + # Enable external resource + properties['use_external_resource'] = True + + # Add node properties config to this dict + properties.update(self.node_properties) + # Reset resource config since we are going to use external resource + # and do not care about the resource config data + properties['resource_config'] = {} + + # Prepare the context for stop operation + self._prepare_context_for_operation( + test_name='ServerTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.stop', + type_hierarchy=self.type_hierarchy, + test_properties=properties, + test_runtime_properties={ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + SERVER_INTERFACE_IDS: [ + 'a95b5509-c122-4c2f-823e-884bb559afe2', + 'a95b5509-c122-4c2f-823e-884bb559af21' + ] + }) + server_instance = openstack.compute.v2.server.Server(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + 'status': 'ACTIVE', + + }) + + # Mock get operation + mock_connection().compute.get_server = \ + mock.MagicMock(return_value=server_instance) + # Stop the server + server.stop() + + self.assertEqual(mock_delete_server_interface.call_count, 2) + + def test_reboot(self, mock_connection): + # Prepare the context for reboot operation + self._prepare_context_for_operation( + test_name='ServerTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.reboot', + type_hierarchy=self.type_hierarchy, + test_runtime_properties={ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8' + }) + server_instance = openstack.compute.v2.server.Server(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + 'status': 'ACTIVE', + + }) + + rebooted_server_instance = openstack.compute.v2.server.Server(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + 'status': 'REBOOT', + + }) + + # Mock stop operation + mock_connection().compute.reboot_server = \ + mock.MagicMock(return_value=None) + + # Mock get operation + mock_connection().compute.get_server = \ + mock.MagicMock(side_effect=[server_instance, + rebooted_server_instance]) + + self._ctx.operation.retry = mock.Mock(side_effect=OperationRetry()) + + with self.assertRaises(OperationRetry): + # Reboot the server + server.reboot() + self._ctx.operation.retry.assert_called_with( + message='Server has REBOOT state. Waiting.', retry_after=30) + + def test_suspend(self, mock_connection): + # Prepare the context for suspend operation + self._prepare_context_for_operation( + test_name='ServerTestCase', + ctx_operation_name='cloudify.interfaces.freeze.suspend', + type_hierarchy=self.type_hierarchy, + test_runtime_properties={ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8' + }) + server_instance = openstack.compute.v2.server.Server(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + 'status': 'ACTIVE', + + }) + + # Mock suspend operation + mock_connection().compute.suspend_server = \ + mock.MagicMock(return_value=None) + + # Mock get operation + mock_connection().compute.get_server = \ + mock.MagicMock(return_value=server_instance) + + # Call suspend + server.suspend() + + def test_resume(self, mock_connection): + # Prepare the context for resume operation + self._prepare_context_for_operation( + test_name='ServerTestCase', + ctx_operation_name='cloudify.interfaces.freeze.resume', + type_hierarchy=self.type_hierarchy, + test_runtime_properties={ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8' + }) + server_instance = openstack.compute.v2.server.Server(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + 'status': 'ACTIVE', + + }) + + # Mock resume operation + mock_connection().compute.resume_server = \ + mock.MagicMock(return_value=None) + + # Mock get operation + mock_connection().compute.get_server = \ + mock.MagicMock(return_value=server_instance) + + # Call resume + server.resume() + + def test_create_snapshot(self, mock_connection): + # Prepare the context for snapshot create operation + self._prepare_context_for_operation( + test_name='ServerTestCase', + ctx_operation_name='cloudify.interfaces.snapshot.create', + type_hierarchy=self.type_hierarchy, + test_runtime_properties={ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8' + }) + server_instance = openstack.compute.v2.server.Server(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + 'status': 'ACTIVE', + + }) + + # Mock backup operation + mock_connection().compute.backup = \ + mock.MagicMock(return_value=None) + + # Mock get server operation + mock_connection().compute.get_server = \ + mock.MagicMock(return_value=server_instance) + + # Mock list image operation + mock_connection().image.images = \ + mock.MagicMock(return_value=[]) + + # Call snapshot + snapshot_params = { + 'snapshot_name': 'test-snapshot', + 'snapshot_incremental': False + } + server.snapshot_create(**snapshot_params) + + # Check if the resource id is already set or not + self.assertIn( + SERVER_TASK_BACKUP_DONE, + self._ctx.instance.runtime_properties) + + def test_create_backup(self, mock_connection): + # Prepare the context for backup create operation + self._prepare_context_for_operation( + test_name='ServerTestCase', + ctx_operation_name='cloudify.interfaces.snapshot.create', + type_hierarchy=self.type_hierarchy, + test_runtime_properties={ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8' + }) + server_instance = openstack.compute.v2.server.Server(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + 'status': 'ACTIVE', + + }) + + # Mock backup operation + mock_connection().compute.create_image = \ + mock.MagicMock(return_value=None) + + # Mock get server operation + mock_connection().compute.get_server = \ + mock.MagicMock(return_value=server_instance) + + # Mock list image operation + mock_connection().image.images = \ + mock.MagicMock(return_value=[]) + + # Call snapshot + snapshot_params = { + 'snapshot_name': 'test-snapshot', + 'snapshot_incremental': True, + 'snapshot_rotation': 2, + 'snapshot_type': 'Daily' + } + server.snapshot_create(**snapshot_params) + + # Check if the resource id is already set or not + self.assertIn( + SERVER_TASK_BACKUP_DONE, + self._ctx.instance.runtime_properties) + + def test_apply_snapshot(self, mock_connection): + # Prepare the context for snapshot apply operation + self._prepare_context_for_operation( + test_name='ServerTestCase', + ctx_operation_name='cloudify.interfaces.snapshot.apply', + type_hierarchy=self.type_hierarchy, + test_runtime_properties={ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8' + }) + + server_instance = openstack.compute.v2.server.Server(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + 'status': 'ACTIVE', + + }) + # Generate the snapshot name for the mocked image + snapshot_name = get_snapshot_name('vm', 'test-snapshot', False) + image = openstack.image.v2.image.Image(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'name': snapshot_name, + 'container_format': 'test_bare', + 'disk_format': 'test_format', + 'checksum': '6d8f1c8cf05e1fbdc8b543fda1a9fa7f', + 'size': 258540032 + + }) + + # Mock backup operation + mock_connection().compute.backup = \ + mock.MagicMock(return_value=None) + + # Mock get server operation + mock_connection().compute.get_server = \ + mock.MagicMock(return_value=server_instance) + + # Mock list image operation + mock_connection().image.images = \ + mock.MagicMock(return_value=[image]) + + # Set runtime properties for apply snapshot + self._ctx.instance.runtime_properties[SERVER_TASK_RESTORE_STATE]\ + = SERVER_ACTION_STATUS_PENDING + self._ctx.instance.runtime_properties[SERVER_TASK_STOP] = \ + SERVER_ACTION_STATUS_DONE + self._ctx.instance.runtime_properties[SERVER_TASK_START] = \ + SERVER_ACTION_STATUS_DONE + + # Call snapshot + snapshot_params = { + 'snapshot_name': 'test-snapshot', + 'snapshot_incremental': False + } + server.snapshot_apply(**snapshot_params) + + def test_apply_backup(self, mock_connection): + # Prepare the context for backup apply operation + self._prepare_context_for_operation( + test_name='ServerTestCase', + ctx_operation_name='cloudify.interfaces.snapshot.apply', + type_hierarchy=self.type_hierarchy, + test_runtime_properties={ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8' + }) + + server_instance = openstack.compute.v2.server.Server(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + 'status': 'ACTIVE', + + }) + + # Generate the snapshot name for the mocked image + snapshot_name = get_snapshot_name('vm', 'test-snapshot', True) + image = openstack.image.v2.image.Image(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'name': snapshot_name, + 'container_format': 'test_bare', + 'disk_format': 'test_format', + 'checksum': '6d8f1c8cf05e1fbdc8b543fda1a9fa7f', + 'size': 258540032 + + }) + + # Mock backup operation + mock_connection().compute.backup = \ + mock.MagicMock(return_value=None) + + # Mock get server operation + mock_connection().compute.get_server = \ + mock.MagicMock(return_value=server_instance) + + # Mock list image operation + mock_connection().image.images = \ + mock.MagicMock(return_value=[image]) + + # Set runtime properties for apply snapshot + self._ctx.instance.runtime_properties[SERVER_TASK_RESTORE_STATE]\ + = SERVER_ACTION_STATUS_PENDING + self._ctx.instance.runtime_properties[SERVER_TASK_STOP] = \ + SERVER_ACTION_STATUS_DONE + self._ctx.instance.runtime_properties[SERVER_TASK_START] = \ + SERVER_ACTION_STATUS_DONE + + # Call snapshot + snapshot_params = { + 'snapshot_name': 'test-snapshot', + 'snapshot_incremental': True + } + server.snapshot_apply(**snapshot_params) + + def test_delete_snapshot(self, mock_connection): + # Prepare the context for snapshot delete operation + self._prepare_context_for_operation( + test_name='ServerTestCase', + ctx_operation_name='cloudify.interfaces.snapshot.delete', + type_hierarchy=self.type_hierarchy, + test_runtime_properties={ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8' + }) + server_instance = openstack.compute.v2.server.Server(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + 'status': 'ACTIVE', + + }) + + # Set runtime properties for snapshot + self._ctx.instance.runtime_properties[SERVER_TASK_BACKUP_DONE]\ + = SERVER_ACTION_STATUS_DONE + self._ctx.instance.runtime_properties[SERVER_TASK_RESTORE_STATE]\ + = SERVER_ACTION_STATUS_DONE + self._ctx.instance.runtime_properties[SERVER_TASK_STOP] = \ + SERVER_ACTION_STATUS_DONE + self._ctx.instance.runtime_properties[SERVER_TASK_START] = \ + SERVER_ACTION_STATUS_DONE + + # Generate the snapshot name for the mocked image + snapshot_name = get_snapshot_name('vm', 'test-snapshot', False) + image = openstack.image.v2.image.Image(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'name': snapshot_name, + 'container_format': 'test_bare', + 'disk_format': 'test_format', + 'checksum': '6d8f1c8cf05e1fbdc8b543fda1a9fa7f', + 'size': 258540032 + + }) + + # Mock get server operation + mock_connection().compute.get_server = \ + mock.MagicMock(return_value=server_instance) + + # Mock list image operation + mock_connection().image.images = \ + mock.MagicMock(side_effect=[[image], []]) + + # Mock list image operation + mock_connection().image.delete_image = \ + mock.MagicMock(return_value=None) + + # Call snapshot + snapshot_params = { + 'snapshot_name': 'test-snapshot', + 'snapshot_incremental': False + } + server.snapshot_delete(**snapshot_params) + + for attr in [SERVER_TASK_RESTORE_STATE, + SERVER_ACTION_STATUS_DONE, + SERVER_TASK_STOP, + SERVER_TASK_START]: + self.assertNotIn(attr, self._ctx.instance.runtime_properties) + + def test_delete_backup(self, mock_connection): + # Prepare the context for snapshot delete backup + self._prepare_context_for_operation( + test_name='ServerTestCase', + ctx_operation_name='cloudify.interfaces.snapshot.delete', + type_hierarchy=self.type_hierarchy, + test_runtime_properties={ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8' + }) + server_instance = openstack.compute.v2.server.Server(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + 'status': 'ACTIVE', + + }) + + # Set runtime properties for snapshot + self._ctx.instance.runtime_properties[SERVER_TASK_BACKUP_DONE]\ + = SERVER_ACTION_STATUS_DONE + self._ctx.instance.runtime_properties[SERVER_TASK_RESTORE_STATE]\ + = SERVER_ACTION_STATUS_DONE + self._ctx.instance.runtime_properties[SERVER_TASK_STOP] = \ + SERVER_ACTION_STATUS_DONE + self._ctx.instance.runtime_properties[SERVER_TASK_START] = \ + SERVER_ACTION_STATUS_DONE + + # Generate the snapshot name for the mocked image + snapshot_name = get_snapshot_name('vm', 'test-snapshot', True) + image = openstack.image.v2.image.Image(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'name': snapshot_name, + 'container_format': 'test_bare', + 'disk_format': 'test_format', + 'checksum': '6d8f1c8cf05e1fbdc8b543fda1a9fa7f', + 'size': 258540032 + + }) + + # Mock get server operation + mock_connection().compute.get_server = \ + mock.MagicMock(return_value=server_instance) + + # Mock list image operation + mock_connection().image.images = \ + mock.MagicMock(side_effect=[[image], []]) + + # Mock list image operation + mock_connection().image.delete_image = \ + mock.MagicMock(return_value=None) + + # Call snapshot + snapshot_params = { + 'snapshot_name': 'test-snapshot', + 'snapshot_incremental': True + } + server.snapshot_delete(**snapshot_params) + + for attr in [SERVER_TASK_RESTORE_STATE, + SERVER_TASK_BACKUP_DONE, + SERVER_TASK_STOP, + SERVER_TASK_START]: + self.assertNotIn(attr, self._ctx.instance.runtime_properties) + + @mock.patch( + 'openstack_plugin.resources.compute.server.wait_until_status') + def test_attach_volume(self, mock_wait_status, mock_connection): + target = MockContext({ + 'instance': MockNodeInstanceContext( + id='server-1', + runtime_properties={ + RESOURCE_ID: '1', + OPENSTACK_TYPE_PROPERTY: SERVER_OPENSTACK_TYPE, + OPENSTACK_NAME_PROPERTY: 'node-server', + }), + 'node': MockNodeContext( + id='1', + properties={ + 'client_config': self.client_config, + 'resource_config': self.resource_config + } + ), '_context': { + 'node_id': '1' + }}) + + source = MockContext({ + 'instance': MockNodeInstanceContext( + id='volume-1', + runtime_properties={ + RESOURCE_ID: '1', + OPENSTACK_TYPE_PROPERTY: VOLUME_OPENSTACK_TYPE, + OPENSTACK_NAME_PROPERTY: 'node-volume', + }), + 'node': MockNodeContext( + id='2', + properties={ + 'device_name': 'test', + 'client_config': self.client_config, + 'resource_config': self.resource_config + } + ), '_context': { + 'node_id': '2' + }}) + + volume_attachment = \ + openstack.compute.v2.volume_attachment.VolumeAttachment(**{ + 'id': '1', + 'server_id': '1', + 'volume_id': '3', + 'attachment_id': '4', + }) + + attachment_task_key = \ + generate_attachment_volume_key(VOLUME_ATTACHMENT_TASK, + 'volume-1', 'server-1') + + attachment_task_id = \ + generate_attachment_volume_key(VOLUME_ATTACHMENT_ID, + 'volume-1', 'server-1') + + mock_wait_status.return_value = volume_attachment + + # Mock list image operation + mock_connection().compute.create_volume_attachment = \ + mock.MagicMock(return_value=volume_attachment) + + self._pepare_relationship_context_for_operation( + deployment_id='ServerTest', + source=source, + target=target, + node_id='1') + + # Call trigger attach volume + server.attach_volume() + + # Check if the resource id is already set or not + self.assertIn( + attachment_task_id, + self._ctx.target.instance.runtime_properties) + + self.assertNotIn( + attachment_task_key, + self._ctx.target.instance.runtime_properties) + + @mock.patch( + 'openstack_plugin.resources.compute.server.wait_until_status') + def test_detach_volume(self, mock_wait_status, mock_connection): + attachment_task_id = \ + generate_attachment_volume_key(VOLUME_ATTACHMENT_ID, + 'volume-1', 'server-1') + + detachment_task_key = \ + generate_attachment_volume_key(VOLUME_DETACHMENT_TASK, + 'volume-1', 'server-1') + target = MockContext({ + 'instance': MockNodeInstanceContext( + id='server-1', + runtime_properties={ + RESOURCE_ID: 'a95b5509-c122-4c2f-823e-884bb559afe8', + OPENSTACK_TYPE_PROPERTY: SERVER_OPENSTACK_TYPE, + OPENSTACK_NAME_PROPERTY: 'node-server', + attachment_task_id: '1' + }), + 'node': MockNodeContext( + id='1', + properties={ + 'client_config': self.client_config, + 'resource_config': self.resource_config + } + ), '_context': { + 'node_id': '1' + }}) + + source = MockContext({ + 'instance': MockNodeInstanceContext( + id='volume-1', + runtime_properties={ + RESOURCE_ID: 'a95b5509-c122-4c2f-823e-884bb559afe7', + OPENSTACK_TYPE_PROPERTY: VOLUME_OPENSTACK_TYPE, + OPENSTACK_NAME_PROPERTY: 'node-volume', + }), + 'node': MockNodeContext( + id='2', + properties={ + 'device_name': 'test', + 'client_config': self.client_config, + 'resource_config': self.resource_config + } + ), '_context': { + 'node_id': '2' + }}) + + volume_attachment = \ + openstack.compute.v2.volume_attachment.VolumeAttachment(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe6', + 'server_id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'volume_id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'attachment_id': 'a95b5509-c122-4c2f-823e-884bb559afe3', + }) + + mock_wait_status.return_value = volume_attachment + + # Mock list image operation + mock_connection().compute.delete_volume_attachment = \ + mock.MagicMock(return_value=None) + + self._pepare_relationship_context_for_operation( + deployment_id='ServerTest', + source=source, + target=target, + node_id='1') + + # Call trigger attach volume + server.detach_volume() + + self.assertNotIn( + detachment_task_key, + self._ctx.target.instance.runtime_properties) + + def test_connect_floating_ip(self, mock_connection): + target = MockContext({ + 'instance': MockNodeInstanceContext( + id='floating-ip-1', + runtime_properties={ + RESOURCE_ID: 'a95b5509-c122-4c2f-823e-884bb559afe2', + OPENSTACK_TYPE_PROPERTY: FLOATING_IP_OPENSTACK_TYPE, + OPENSTACK_NAME_PROPERTY: 'node-floating-ip', + }), + 'node': MockNodeContext( + id='1', + properties={ + 'client_config': self.client_config, + 'resource_config': self.resource_config + } + ), '_context': { + 'node_id': '1' + }}) + + source = MockContext({ + 'instance': MockNodeInstanceContext( + id='server-1', + runtime_properties={ + RESOURCE_ID: 'a95b5509-c122-4c2f-823e-884bb559afe8', + OPENSTACK_TYPE_PROPERTY: SERVER_OPENSTACK_TYPE, + OPENSTACK_NAME_PROPERTY: 'node-server', + }), + 'node': MockNodeContext( + id='1', + properties={ + 'client_config': self.client_config, + 'resource_config': self.resource_config + } + ), '_context': { + 'node_id': '1' + }}) + + # Mock list image operation + mock_connection().compute.add_floating_ip_to_server = \ + mock.MagicMock(return_value=None) + + self._pepare_relationship_context_for_operation( + deployment_id='ServerTest', + source=source, + target=target, + node_id='1') + + # Call trigger attach volume + server.connect_floating_ip(floating_ip='10.2.3.4') + + def test_disconnect_floating_ip(self, mock_connection): + target = MockContext({ + 'instance': MockNodeInstanceContext( + id='floating-ip-1', + runtime_properties={ + RESOURCE_ID: '10.2.3.4', + OPENSTACK_TYPE_PROPERTY: FLOATING_IP_OPENSTACK_TYPE, + OPENSTACK_NAME_PROPERTY: 'node-floating-ip', + }), + 'node': MockNodeContext( + id='1', + properties={ + 'client_config': self.client_config, + 'resource_config': self.resource_config + } + ), '_context': { + 'node_id': '1' + }}) + + source = MockContext({ + 'instance': MockNodeInstanceContext( + id='server-1', + runtime_properties={ + RESOURCE_ID: 'a95b5509-c122-4c2f-823e-884bb559afe8', + OPENSTACK_TYPE_PROPERTY: SERVER_OPENSTACK_TYPE, + OPENSTACK_NAME_PROPERTY: 'node-server', + }), + 'node': MockNodeContext( + id='1', + properties={ + 'client_config': self.client_config, + 'resource_config': self.resource_config + } + ), '_context': { + 'node_id': '1' + }}) + + # Mock list image operation + mock_connection().compute.remove_floating_ip_from_server = \ + mock.MagicMock(return_value=None) + + self._pepare_relationship_context_for_operation( + deployment_id='ServerTest', + source=source, + target=target, + node_id='1') + + # Call trigger attach volume + server.disconnect_floating_ip(floating_ip='10.2.3.4') + + def test_connect_security_group(self, mock_connection): + target = MockContext({ + 'instance': MockNodeInstanceContext( + id='security-group-1', + runtime_properties={ + RESOURCE_ID: 'a95b5509-c122-4c2f-823e-884bb559afe7', + OPENSTACK_TYPE_PROPERTY: SECURITY_GROUP_OPENSTACK_TYPE, + OPENSTACK_NAME_PROPERTY: 'node-security-group', + }), + 'node': MockNodeContext( + id='1', + properties={ + 'client_config': self.client_config, + 'resource_config': self.resource_config + } + ), '_context': { + 'node_id': '1' + }}) + + source = MockContext({ + 'instance': MockNodeInstanceContext( + id='server-1', + runtime_properties={ + RESOURCE_ID: 'a95b5509-c122-4c2f-823e-884bb559afe8', + OPENSTACK_TYPE_PROPERTY: SERVER_OPENSTACK_TYPE, + OPENSTACK_NAME_PROPERTY: 'node-server', + }), + 'node': MockNodeContext( + id='1', + properties={ + 'client_config': self.client_config, + 'resource_config': self.resource_config + } + ), '_context': { + 'node_id': '1' + }}) + + # Mock list image operation + mock_connection().compute.add_security_group_to_server = \ + mock.MagicMock(return_value=None) + + self._pepare_relationship_context_for_operation( + deployment_id='ServerTest', + source=source, + target=target, + node_id='1') + + # Call trigger attach volume + server.connect_security_group(security_group_id='1') + + @mock.patch( + 'openstack_plugin.resources.compute.' + 'server._disconnect_security_group_from_server_ports') + def test_disconnect_security_group(self, + mock_clean_ports, + mock_connection): + target = MockContext({ + 'instance': MockNodeInstanceContext( + id='security-group-1', + runtime_properties={ + RESOURCE_ID: 'a95b5509-c122-4c2f-823e-884bb559afe7', + OPENSTACK_TYPE_PROPERTY: SECURITY_GROUP_OPENSTACK_TYPE, + OPENSTACK_NAME_PROPERTY: 'node-security-group', + }), + 'node': MockNodeContext( + id='1', + properties={ + 'client_config': self.client_config, + 'resource_config': self.resource_config + } + ), '_context': { + 'node_id': '1' + }}) + + source = MockContext({ + 'instance': MockNodeInstanceContext( + id='server-1', + runtime_properties={ + RESOURCE_ID: 'a95b5509-c122-4c2f-823e-884bb559afe8', + OPENSTACK_TYPE_PROPERTY: SERVER_OPENSTACK_TYPE, + OPENSTACK_NAME_PROPERTY: 'node-server', + 'server': { + 'name': 'test', + 'security_groups': [ + { + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe5' + }, + { + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe4' + } + ] + } + }), + 'node': MockNodeContext( + id='1', + properties={ + 'client_config': self.client_config, + 'resource_config': self.resource_config + } + ), '_context': { + 'node_id': '1' + }}) + + # Mock list image operation + mock_connection().compute.remove_security_group_from_server = \ + mock.MagicMock(return_value=None) + + self._pepare_relationship_context_for_operation( + deployment_id='ServerTest', + source=source, + target=target, + node_id='1') + + # Call trigger attach volume + server.disconnect_security_group( + security_group_id='a95b5509-c122-4c2f-823e-884bb559afe7') + mock_clean_ports.assert_called() + + def test_delete_with_retry(self, mock_connection): + # Prepare the context for delete operation + self._prepare_context_for_operation( + test_name='ServerTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.delete', + type_hierarchy=self.type_hierarchy, + test_runtime_properties={ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8' + }) + server_instance = openstack.compute.v2.server.Server(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + 'status': 'ACTIVE', + + }) + # Mock delete operation + mock_connection().compute.delete_server = \ + mock.MagicMock(return_value=None) + + # Mock get operation + mock_connection().compute.get_server = \ + mock.MagicMock(return_value=server_instance) + + # Call delete server operation + with self.assertRaises(OperationRetry): + server.delete() + # Check if the resource id is already set or not + self.assertIn( + SERVER_TASK_DELETE, + self._ctx.instance.runtime_properties) + + def test_delete_with_success(self, mock_connection): + # Prepare the context for delete operation + self._prepare_context_for_operation( + test_name='ServerTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.delete', + type_hierarchy=self.type_hierarchy, + test_runtime_properties={ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + SERVER_TASK_DELETE: True, + }) + + # Mock get operation + mock_connection().compute.get_server = \ + mock.MagicMock(side_effect=openstack.exceptions.ResourceNotFound) + + server.delete() + + def test_delete_with_error(self, mock_connection): + # Prepare the context for delete operation + self._prepare_context_for_operation( + test_name='ServerTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.delete', + type_hierarchy=self.type_hierarchy, + test_runtime_properties={ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8' + }) + + # Mock get operation + mock_connection().compute.get_server = \ + mock.MagicMock(side_effect=openstack.exceptions.ResourceNotFound) + + with self.assertRaises(NonRecoverableError): + server.delete() + + def test_update(self, mock_connection): + # Prepare the context for update operation + self._prepare_context_for_operation( + test_name='ServerTestCase', + ctx_operation_name='cloudify.interfaces.operations.update', + type_hierarchy=self.type_hierarchy, + test_runtime_properties={ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8' + }) + old_server = openstack.compute.v2.server.Server(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + }) + + new_config = { + 'name': 'update_test_server', + } + + new_server = openstack.compute.v2.server.Server(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'update_test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + }) + mock_connection().compute.get_server = \ + mock.MagicMock(return_value=old_server) + mock_connection().compute.update_server = \ + mock.MagicMock(return_value=new_server) + + server.update(args=new_config) + + # Check if the server payload is assigned for the created server + self.assertIn( + SERVER_OPENSTACK_TYPE, + self._ctx.instance.runtime_properties) + + # Compare old name value against updated name + self.assertNotEqual( + self._ctx.instance.runtime_properties[SERVER_OPENSTACK_TYPE][ + 'name'], old_server.name) + + def test_list_servers(self, mock_connection): + # Prepare the context for list servers operation + self._prepare_context_for_operation( + test_name='ServerTestCase', + ctx_operation_name='cloudify.interfaces.operations.list', + type_hierarchy=self.type_hierarchy) + server_list = [ + openstack.compute.v2.server.ServerDetail(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server_1', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + }), + openstack.compute.v2.server.ServerDetail(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'name': 'test_server_2', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + }), + ] + + mock_connection().compute.servers = \ + mock.MagicMock(return_value=server_list) + + # Call list servers + server.list_servers() + + # Check if the server list saved as runtime properties + self.assertIn( + 'server_list', + self._ctx.instance.runtime_properties) + + # Check the size of server list + self.assertEqual( + len(self._ctx.instance.runtime_properties['server_list']), 2) + + @mock.patch('openstack_sdk.common.OpenstackResource.get_quota_sets') + def test_creation_validation(self, mock_quota_sets, mock_connection): + # Prepare the context for creation validation servers operation + self._prepare_context_for_operation( + test_name='ServerTestCase', + ctx_operation_name='cloudify.interfaces.validation.creation', + type_hierarchy=self.type_hierarchy) + server_list = [ + openstack.compute.v2.server.ServerDetail(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server_1', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + }), + openstack.compute.v2.server.ServerDetail(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'name': 'test_server_2', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {'region': '3'}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + }), + ] + + # Mock the server list API + mock_connection().compute.servers = \ + mock.MagicMock(return_value=server_list) + + # Mock the quota size response + mock_quota_sets.return_value = 20 + + # Call creation validation + server.creation_validation() diff --git a/openstack_plugin/tests/compute/test_server_group.py b/openstack_plugin/tests/compute/test_server_group.py new file mode 100644 index 00000000..dde35ce7 --- /dev/null +++ b/openstack_plugin/tests/compute/test_server_group.py @@ -0,0 +1,189 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Third party imports +import mock +import openstack.compute.v2.server_group +from cloudify.exceptions import NonRecoverableError + +# Local imports +from openstack_plugin.tests.base import OpenStackTestBase +from openstack_plugin.resources.compute import server_group +from openstack_plugin.constants import (RESOURCE_ID, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + SERVER_GROUP_OPENSTACK_TYPE) + + +@mock.patch('openstack.connect') +class ServerGroupTestCase(OpenStackTestBase): + + def setUp(self): + super(ServerGroupTestCase, self).setUp() + + @property + def resource_config(self): + return { + 'name': 'test_server_group', + 'description': 'server_group_description' + } + + def test_create(self, mock_connection): + # Prepare the context for create operation + self._prepare_context_for_operation( + test_name='ServerGroupTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.create') + + server_instance = openstack.compute.v2.server_group.ServerGroup(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server_group', + 'members': ['server1', 'server2'], + 'metadata': {'k': 'v'}, + 'policies': ['anti-affinity'], + + }) + # Mock create server group response + mock_connection().compute.create_server_group = \ + mock.MagicMock(return_value=server_instance) + + # Call create server group + server_group.create() + + self.assertEqual(self._ctx.instance.runtime_properties[RESOURCE_ID], + 'a95b5509-c122-4c2f-823e-884bb559afe8') + + self.assertEqual( + self._ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY], + 'test_server_group') + + self.assertEqual( + self._ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY], + SERVER_GROUP_OPENSTACK_TYPE) + + def test_delete(self, mock_connection): + # Prepare the context for delete operation + self._prepare_context_for_operation( + test_name='ServerGroupTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.delete') + + server_instance = openstack.compute.v2.server_group.ServerGroup(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server_group', + 'members': ['server1', 'server2'], + 'metadata': {'k': 'v'}, + 'policies': ['anti-affinity'], + + }) + # Mock delete server group response + mock_connection().compute.delete_server_group = \ + mock.MagicMock(return_value=None) + + # Mock delete server group response + mock_connection().compute.get_server_group = \ + mock.MagicMock(return_value=server_instance) + + # Call delete server group + server_group.delete() + + for attr in [RESOURCE_ID, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY]: + self.assertNotIn(attr, + self._ctx.instance.runtime_properties) + + def test_update(self, _): + # Prepare the context for update operation + self._prepare_context_for_operation( + test_name='ServerGroupTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.update') + + updated_config = { + 'name': 'Updated Name' + } + + with self.assertRaises(NonRecoverableError): + # Call update server group + server_group.update(args=updated_config) + + def test_list_server_groups(self, mock_connection): + # Prepare the context for list server groups operation + self._prepare_context_for_operation( + test_name='ServerGroupTestCase', + ctx_operation_name='cloudify.interfaces.operations.list') + + server_group_list = [ + openstack.compute.v2.server_group.ServerGroup(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server_group_1', + 'members': ['server1', 'server2'], + 'metadata': {'k': 'v'}, + 'policies': ['anti-affinity'], + }), + openstack.compute.v2.server_group.ServerGroup(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'name': 'test_server_group_2', + 'members': ['server2', 'server3'], + 'metadata': {'k': 'v'}, + 'policies': ['anti-affinity'], + }), + ] + # Mock list keypairs + mock_connection().compute.server_groups = \ + mock.MagicMock(return_value=server_group_list) + + # Call list server groups + server_group.list_server_groups() + + # Check if the server groups list saved as runtime properties + self.assertIn( + 'server_group_list', + self._ctx.instance.runtime_properties) + + # Check the size of server groups list + self.assertEqual( + len(self._ctx.instance.runtime_properties['server_group_list']), 2) + + @mock.patch('openstack_sdk.common.OpenstackResource.get_quota_sets') + def test_creation_validation(self, mock_quota_sets, mock_connection): + # Prepare the context for creation validation server groups operation + self._prepare_context_for_operation( + test_name='ServerGroupTestCase', + ctx_operation_name='cloudify.interfaces.validation.creation') + + server_group_list = [ + openstack.compute.v2.server_group.ServerGroup(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server_group_1', + 'members': ['server1', 'server2'], + 'metadata': {'k': 'v'}, + 'policies': ['anti-affinity'], + }), + openstack.compute.v2.server_group.ServerGroup(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'name': 'test_server_group_2', + 'members': ['server2', 'server3'], + 'metadata': {'k': 'v'}, + 'policies': ['anti-affinity'], + }), + ] + # Mock list server groups + mock_connection().compute.server_groups = \ + mock.MagicMock(return_value=server_group_list) + + # Mock the quota size response + mock_quota_sets.return_value = 20 + + # Call creation validation + server_group.creation_validation() diff --git a/openstack_plugin/tests/identity/__init__.py b/openstack_plugin/tests/identity/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/openstack_plugin/tests/identity/test_project.py b/openstack_plugin/tests/identity/test_project.py new file mode 100644 index 00000000..4af44389 --- /dev/null +++ b/openstack_plugin/tests/identity/test_project.py @@ -0,0 +1,440 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Third party imports +import mock +import openstack.identity.v3.project +import openstack.identity.v2.user +import openstack.identity.v2.role +from cloudify.exceptions import NonRecoverableError + +# Local imports +from openstack_sdk.resources.identity import OpenstackProject +from openstack_plugin.tests.base import OpenStackTestBase +from openstack_plugin.resources.identity import project +from openstack_plugin.constants import (RESOURCE_ID, + IDENTITY_USERS, + IDENTITY_QUOTA, + IDENTITY_ROLES, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + PROJECT_OPENSTACK_TYPE) + + +@mock.patch('openstack.connect') +class ProjectTestCase(OpenStackTestBase): + + def setUp(self): + super(ProjectTestCase, self).setUp() + + @property + def resource_config(self): + return { + 'name': 'test_project', + 'description': 'project_description' + } + + @property + def users(self): + return [ + { + 'name': 'user-1', + IDENTITY_ROLES: [ + 'test-role-1', + 'test-role-2', + 'test-role-3' + ] + } + ] + + @property + def node_properties(self): + properties = super(ProjectTestCase, self).node_properties + properties[IDENTITY_USERS] = self.users + return properties + + def test_create(self, mock_connection): + # Prepare the context for create operation + self._prepare_context_for_operation( + test_name='ProjectTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.create') + + project_instance = openstack.identity.v3.project.Project(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_project', + 'description': 'Testing Project', + 'domain_id': 'test_domain_id', + 'enabled': True, + 'is_domain': True, + 'links': ['test1', 'test2'], + 'parent_id': 'test_parent_id' + + }) + # Mock create project response + mock_connection().identity.create_project = \ + mock.MagicMock(return_value=project_instance) + + # Call create project + project.create() + + self.assertEqual(self._ctx.instance.runtime_properties[RESOURCE_ID], + 'a95b5509-c122-4c2f-823e-884bb559afe8') + + self.assertEqual( + self._ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY], + 'test_project') + + self.assertEqual( + self._ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY], + PROJECT_OPENSTACK_TYPE) + + def test_delete(self, mock_connection): + # Prepare the context for delete operation + self._prepare_context_for_operation( + test_name='ProjectTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.delete') + + project_instance = openstack.identity.v3.project.Project(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_project', + 'description': 'Testing Project', + 'domain_id': 'test_domain_id', + 'enabled': True, + 'is_domain': True, + 'links': ['test1', 'test2'], + 'parent_id': 'test_parent_id' + + }) + # Mock delete project response + mock_connection().identity.get_project = \ + mock.MagicMock(return_value=project_instance) + + # Mock delete project response + mock_connection().identity.delete_project = \ + mock.MagicMock(return_value=None) + + # Call delete project + project.delete() + + for attr in [RESOURCE_ID, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY]: + self.assertNotIn(attr, self._ctx.instance.runtime_properties) + + def test_update(self, mock_connection): + # Prepare the context for update operation + self._prepare_context_for_operation( + test_name='ProjectTestCase', + ctx_operation_name='cloudify.interfaces.operations.update_project') + + old_project_instance = openstack.identity.v3.project.Project(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_project', + 'description': 'Testing Project', + 'domain_id': 'test_domain_id', + 'enabled': True, + 'is_domain': True, + 'links': ['test1', 'test2'], + 'parent_id': 'test_parent_id' + + }) + + new_config = { + 'name': 'update_project', + } + + new_project_instance = openstack.identity.v3.project.Project(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_updated_project', + 'description': 'Testing Project', + 'domain_id': 'test_domain_id', + 'enabled': True, + 'is_domain': True, + 'links': ['test1', 'test2'], + 'parent_id': 'test_parent_id' + + }) + + # Mock get project response + mock_connection().identity.get_project = \ + mock.MagicMock(return_value=old_project_instance) + + # Mock update project response + mock_connection().identity.update_project = \ + mock.MagicMock(return_value=new_project_instance) + + # Call update project + project.update(args=new_config) + + def test_get_quota(self, _): + # Prepare the context for get quota operation + self._prepare_context_for_operation( + test_name='ProjectTestCase', + ctx_operation_name='cloudify.interfaces.operations.get_quota') + + with self.assertRaises(NonRecoverableError): + project.get_project_quota() + + def test_update_project_quota(self, _): + # Prepare the context for update quota operation + self._prepare_context_for_operation( + test_name='ProjectTestCase', + ctx_operation_name='cloudify.interfaces.operations.update_quota') + + with self.assertRaises(NonRecoverableError): + project.update_project_quota() + + def test_list_projects(self, mock_connection): + # Prepare the context for list projects operation + self._prepare_context_for_operation( + test_name='ProjectTestCase', + ctx_operation_name='cloudify.interfaces.operations.list') + + projects = [ + openstack.identity.v3.project.Project(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_project_1', + 'description': 'Testing Project 1', + 'domain_id': 'test_domain_id', + 'enabled': True, + 'is_domain': True, + 'links': ['test1', 'test2'], + 'parent_id': 'test_parent_id' + }), + openstack.identity.v3.project.Project(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'name': 'test_project_2', + 'description': 'Testing Project 2', + 'domain_id': 'test_domain_id', + 'enabled': True, + 'is_domain': True, + 'links': ['test1', 'test2'], + 'parent_id': 'test_parent_id' + }), + ] + + # Mock list project response + mock_connection().identity.projects = \ + mock.MagicMock(return_value=projects) + + # Call list projects + project.list_projects() + + # Check if the projects list saved as runtime properties + self.assertIn( + 'project_list', + self._ctx.instance.runtime_properties) + + # Check the size of project list + self.assertEqual( + len(self._ctx.instance.runtime_properties['project_list']), 2) + + @mock.patch('openstack_sdk.common.OpenstackResource.get_quota_sets') + def test_creation_validation(self, mock_quota_sets, mock_connection): + # Prepare the context for creation validation projects operation + self._prepare_context_for_operation( + test_name='ProjectTestCase', + ctx_operation_name='cloudify.interfaces.validation.creation') + + projects = [ + openstack.identity.v3.project.Project(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_project_1', + 'description': 'Testing Project 1', + 'domain_id': 'test_domain_id', + 'enabled': True, + 'is_domain': True, + 'links': ['test1', 'test2'], + 'parent_id': 'test_parent_id' + }), + openstack.identity.v3.project.Project(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'name': 'test_project_2', + 'description': 'Testing Project 2', + 'domain_id': 'test_domain_id', + 'enabled': True, + 'is_domain': True, + 'links': ['test1', 'test2'], + 'parent_id': 'test_parent_id' + }), + ] + + # Mock list project response + mock_connection().identity.projects = \ + mock.MagicMock(return_value=projects) + + # Mock the quota size response + mock_quota_sets.return_value = 20 + + # Call creation validation + project.creation_validation() + + @mock.patch( + 'openstack_plugin.resources.identity.project._assign_users') + @mock.patch( + 'openstack_plugin.resources.identity.project._validate_users') + def test_start(self, mock_validate, mock_assign, mock_connection): + # Prepare the context for start operation + self._prepare_context_for_operation( + test_name='ProjectTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.start') + + project_instance = openstack.identity.v3.project.Project(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_project', + 'description': 'Testing Project', + 'domain_id': 'test_domain_id', + 'enabled': True, + 'is_domain': True, + 'links': ['test1', 'test2'], + 'parent_id': 'test_parent_id' + + }) + # Mock get project response + mock_connection().identity.get_project = \ + mock.MagicMock(return_value=project_instance) + + # Call start project + project.start() + mock_validate.assert_called() + mock_assign.assert_called() + + @mock.patch( + 'openstack_plugin.resources.identity.project._assign_users') + @mock.patch( + 'openstack_plugin.resources.identity.project._validate_users') + def test_invalid_start(self, mock_validate, mock_assign, mock_connection): + # Prepare the context for start operation + properties = dict() + properties[IDENTITY_QUOTA] = {'compute': 22} + properties.update(self.node_properties) + self._prepare_context_for_operation( + test_name='ProjectTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.start', + test_properties=properties) + + project_instance = openstack.identity.v3.project.Project(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_project', + 'description': 'Testing Project', + 'domain_id': 'test_domain_id', + 'enabled': True, + 'is_domain': True, + 'links': ['test1', 'test2'], + 'parent_id': 'test_parent_id' + + }) + # Mock get project response + mock_connection().identity.get_project = \ + mock.MagicMock(return_value=project_instance) + + with self.assertRaises(NonRecoverableError): + # Call start project + project.start() + mock_validate.assert_called() + mock_assign.assert_called() + + def test_validate_users(self, mock_connection): + # Prepare the context for start operation + self._prepare_context_for_operation( + test_name='ProjectTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.start') + + user_instance_1 = openstack.identity.v2.user.User(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'user-1', + 'email': 'test_email', + 'is_enabled': True + + }) + role_instance_1 = openstack.identity.v2.role.Role(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'name': 'test-role-1', + 'description': 'Testing Role 1', + 'is_enabled': True + }) + role_instance_2 = openstack.identity.v2.role.Role(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe6', + 'name': 'test-role-2', + 'description': 'Testing Role 2', + 'domain_id': 'test_domain_id', + 'is_enabled': True + }) + role_instance_3 = openstack.identity.v2.role.Role(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe5', + 'name': 'test-role-3', + 'description': 'Testing Role 3', + 'is_enabled': True + }) + # Mock find user response + mock_connection().identity.find_user = \ + mock.MagicMock(return_value=user_instance_1) + + # Mock find role response + mock_connection().identity.find_role = \ + mock.MagicMock(side_effect=[role_instance_1, + role_instance_2, + role_instance_3]) + + # Call start project + project._validate_users(self.client_config, self.users) + + def test_assign_users(self, mock_connection): + # Prepare the context for start operation + self._prepare_context_for_operation( + test_name='ProjectTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.start') + + user_instance_1 = openstack.identity.v2.user.User(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'user-1', + 'email': 'test_email', + 'is_enabled': True + + }) + role_instance_1 = openstack.identity.v2.role.Role(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'name': 'test-role-1', + 'description': 'Testing Role 1', + 'is_enabled': True + }) + role_instance_2 = openstack.identity.v2.role.Role(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe6', + 'name': 'test-role-2', + 'description': 'Testing Role 2', + 'domain_id': 'test_domain_id', + 'is_enabled': True + }) + role_instance_3 = openstack.identity.v2.role.Role(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe5', + 'name': 'test-role-3', + 'description': 'Testing Role 3', + 'is_enabled': True + }) + # Mock find user response + mock_connection().identity.find_user = \ + mock.MagicMock(return_value=user_instance_1) + + # Mock find role response + mock_connection().identity.find_role = \ + mock.MagicMock(side_effect=[role_instance_1, + role_instance_2, + role_instance_3]) + + project_instance = OpenstackProject(client_config=self.client_config) + project_instance.resource_id = 'a95b5509-c122-4c2f-823e-884bb559afe9' + + # Call start project + project._assign_users(project_instance, self.users) diff --git a/openstack_plugin/tests/identity/test_user.py b/openstack_plugin/tests/identity/test_user.py new file mode 100644 index 00000000..7f19632f --- /dev/null +++ b/openstack_plugin/tests/identity/test_user.py @@ -0,0 +1,176 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Third party imports +import mock +import openstack.identity.v2.user + +# Local imports +from openstack_plugin.tests.base import OpenStackTestBase +from openstack_plugin.resources.identity import user +from openstack_plugin.constants import (RESOURCE_ID, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + USER_OPENSTACK_TYPE) + + +@mock.patch('openstack.connect') +class UserTestCase(OpenStackTestBase): + + def setUp(self): + super(UserTestCase, self).setUp() + + @property + def resource_config(self): + return { + 'name': 'test_user', + 'description': 'user_description', + 'is_enabled': True, + 'email': 'test_email@test.com' + } + + def test_create(self, mock_connection): + # Prepare the context for create operation + self._prepare_context_for_operation( + test_name='UserTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.create') + + user_instance = openstack.identity.v2.user.User(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_user', + 'is_enabled': True, + 'email': 'test_email@test.com', + + }) + # Mock create user response + mock_connection().identity.create_user = \ + mock.MagicMock(return_value=user_instance) + + # Call create user + user.create() + + self.assertEqual(self._ctx.instance.runtime_properties[RESOURCE_ID], + 'a95b5509-c122-4c2f-823e-884bb559afe8') + + self.assertEqual( + self._ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY], + 'test_user') + + self.assertEqual( + self._ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY], + USER_OPENSTACK_TYPE) + + def test_delete(self, mock_connection): + # Prepare the context for delete operation + self._prepare_context_for_operation( + test_name='UserTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.delete') + + user_instance = openstack.identity.v2.user.User(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_user', + 'is_enabled': True, + 'email': 'test_email@test.com', + + }) + # Mock delete user response + mock_connection().identity.delete_user = \ + mock.MagicMock(return_value=user_instance) + + # Mock get user response + mock_connection().identity.get_user = \ + mock.MagicMock(return_value=user_instance) + + # Call delete user + user.delete() + + for attr in [RESOURCE_ID, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY]: + self.assertNotIn(attr, self._ctx.instance.runtime_properties) + + def test_update(self, mock_connection): + # Prepare the context for update operation + self._prepare_context_for_operation( + test_name='UserTestCase', + ctx_operation_name='cloudify.interfaces.operations.update') + + old_user_instance = openstack.identity.v2.user.User(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_user', + 'is_enabled': True, + 'email': 'test_email@test.com', + + }) + + new_config = { + 'name': 'test_updated_user', + } + + new_user_instance = openstack.identity.v2.user.User(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_updated_user', + 'is_enabled': True, + 'email': 'test_email@test.com', + + }) + + # Mock get user response + mock_connection().identity.get_user = \ + mock.MagicMock(return_value=old_user_instance) + + # Mock update user response + mock_connection().identity.update_user = \ + mock.MagicMock(return_value=new_user_instance) + + # Call update user + user.update(args=new_config) + + def test_list_users(self, mock_connection): + # Prepare the context for list users operation + self._prepare_context_for_operation( + test_name='UserTestCase', + ctx_operation_name='cloudify.interfaces.operations.list') + + users = [ + openstack.identity.v2.user.User(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_user_!', + 'is_enabled': True, + 'email': 'test1_email@test.com', + }), + openstack.identity.v2.user.User(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'name': 'test_user_2', + 'is_enabled': True, + 'email': 'test2_email@test.com', + }), + ] + + # Mock list users response + mock_connection().identity.users = \ + mock.MagicMock(return_value=users) + + # Call list user + user.list_users() + + # Check if the projects list saved as runtime properties + self.assertIn( + 'user_list', + self._ctx.instance.runtime_properties) + + # Check the size of project list + self.assertEqual( + len(self._ctx.instance.runtime_properties['user_list']), 2) diff --git a/openstack_plugin/tests/network/__init__.py b/openstack_plugin/tests/network/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/openstack_plugin/tests/network/test_floating_ip.py b/openstack_plugin/tests/network/test_floating_ip.py new file mode 100644 index 00000000..7dd02e42 --- /dev/null +++ b/openstack_plugin/tests/network/test_floating_ip.py @@ -0,0 +1,394 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Third party imports +import mock +import openstack.network.v2.floating_ip + +# Local imports +from openstack_plugin.tests.base import OpenStackTestBase +from openstack_plugin.resources.network import floating_ip +from openstack_plugin.constants import (RESOURCE_ID, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + FLOATING_IP_OPENSTACK_TYPE, + NETWORK_OPENSTACK_TYPE, + PORT_OPENSTACK_TYPE, + SUBNET_OPENSTACK_TYPE, + NETWORK_NODE_TYPE, + PORT_NODE_TYPE, + SUBNET_NODE_TYPE) + + +@mock.patch('openstack.connect') +class FloatingIPTestCase(OpenStackTestBase): + + def setUp(self): + super(FloatingIPTestCase, self).setUp() + + @property + def resource_config(self): + return { + 'name': '10.0.0.1', + 'description': 'floating_ip_description', + } + + def test_create(self, mock_connection): + # Prepare the context for create operation + rel_specs = [ + { + 'node': { + 'id': 'network-1', + 'properties': { + 'client_config': self.client_config, + 'resource_config': { + 'name': 'test-network', + } + } + }, + 'instance': { + 'id': 'network-1-efrgsd', + 'runtime_properties': { + RESOURCE_ID: 'a95b5509-c122-4c2f-823e-884bb559afe8', + OPENSTACK_TYPE_PROPERTY: NETWORK_OPENSTACK_TYPE, + OPENSTACK_NAME_PROPERTY: 'test-network' + } + }, + 'type': NETWORK_NODE_TYPE, + }, + { + 'node': { + 'id': 'port-1', + 'properties': { + 'client_config': self.client_config, + 'resource_config': { + 'name': 'test-port', + } + } + }, + 'instance': { + 'id': 'port-1-efrgsd', + 'runtime_properties': { + RESOURCE_ID: 'a95b5509-c122-4c2f-823e-884bb559afe7', + OPENSTACK_TYPE_PROPERTY: PORT_OPENSTACK_TYPE, + OPENSTACK_NAME_PROPERTY: 'test-port' + } + }, + 'type': PORT_NODE_TYPE, + }, + { + 'node': { + 'id': 'subnet-1', + 'properties': { + 'client_config': self.client_config, + 'resource_config': { + 'name': 'test-subnet', + } + } + }, + 'instance': { + 'id': 'subnet-1-efrgsd', + 'runtime_properties': { + RESOURCE_ID: 'a95b5509-c122-4c2f-823e-884bb559afe6', + OPENSTACK_TYPE_PROPERTY: SUBNET_OPENSTACK_TYPE, + OPENSTACK_NAME_PROPERTY: 'test-subnet' + } + }, + 'type': SUBNET_NODE_TYPE, + } + ] + + floating_ip_rels = self.get_mock_relationship_ctx_for_node(rel_specs) + self._prepare_context_for_operation( + test_name='FloatingIPTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.create', + test_relationships=floating_ip_rels) + + floating_ip_instance = openstack.network.v2.floating_ip.FloatingIP(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe4', + 'description': 'test_description', + 'name': '10.0.0.1', + 'created_at': '2016-03-09T12:14:57.233772', + 'fixed_ip_address': '', + 'floating_ip_address': '10.0.0.1', + 'floating_network_id': '3', + 'port_id': '5', + 'qos_policy_id': '51', + 'tenant_id': '6', + 'router_id': '7', + 'dns_domain': '9', + 'dns_name': '10', + 'status': 'ACTIVE', + 'revision_number': 12, + 'updated_at': '2016-07-09T12:14:57.233772', + 'subnet_id': '14', + 'tags': ['15', '16'] + + }) + # Mock create floating ip response + mock_connection().network.create_ip = \ + mock.MagicMock(return_value=floating_ip_instance) + + # Call create floating ip + floating_ip.create() + + self.assertEqual(self._ctx.instance.runtime_properties[RESOURCE_ID], + 'a95b5509-c122-4c2f-823e-884bb559afe4') + + self.assertEqual( + self._ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY], + '10.0.0.1') + + self.assertEqual( + self._ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY], + FLOATING_IP_OPENSTACK_TYPE) + + self.assertEqual( + self._ctx.instance.runtime_properties['floating_ip_address'], + '10.0.0.1') + + def test_delete(self, mock_connection): + # Prepare the context for delete operation + self._prepare_context_for_operation( + test_name='FloatingIPTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.delete') + + floating_ip_instance = openstack.network.v2.floating_ip.FloatingIP(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe4', + 'description': 'test_description', + 'name': '10.0.0.1', + 'created_at': '2016-03-09T12:14:57.233772', + 'fixed_ip_address': '', + 'floating_ip_address': '10.0.0.1', + 'floating_network_id': '3', + 'port_id': '5', + 'qos_policy_id': '51', + 'tenant_id': '6', + 'router_id': '7', + 'dns_domain': '9', + 'dns_name': '10', + 'status': 'ACTIVE', + 'revision_number': 12, + 'updated_at': '2016-07-09T12:14:57.233772', + 'subnet_id': '14', + 'tags': ['15', '16'] + + }) + # Mock delete floating ip response + mock_connection().network.delete_ip = \ + mock.MagicMock(return_value=None) + + # Mock get floating ip response + mock_connection().network.get_ip = \ + mock.MagicMock(return_value=floating_ip_instance) + + # Call delete floating ip + floating_ip.delete() + + for attr in [RESOURCE_ID, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + 'floating_ip_address']: + self.assertNotIn(attr, self._ctx.instance.runtime_properties) + + def test_update(self, mock_connection): + # Prepare the context for update operation + self._prepare_context_for_operation( + test_name='FloatingIPTestCase', + ctx_operation_name='cloudify.interfaces.operations.update') + + old_floating_ip_instance = \ + openstack.network.v2.floating_ip.FloatingIP(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe4', + 'description': 'test_description', + 'name': '10.0.0.1', + 'created_at': '2016-03-09T12:14:57.233772', + 'fixed_ip_address': '', + 'floating_ip_address': '10.0.0.1', + 'floating_network_id': '3', + 'port_id': '5', + 'qos_policy_id': '51', + 'tenant_id': '6', + 'router_id': '7', + 'dns_domain': '9', + 'dns_name': '10', + 'status': 'ACTIVE', + 'revision_number': 12, + 'updated_at': '2016-07-09T12:14:57.233772', + 'subnet_id': '14', + 'tags': ['15', '16'] + }) + + new_config = { + 'port_id': '6', + } + + new_floating_ip_instance = \ + openstack.network.v2.floating_ip.FloatingIP(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe4', + 'description': 'test_description', + 'name': '10.0.0.1', + 'created_at': '2016-03-09T12:14:57.233772', + 'fixed_ip_address': '', + 'floating_ip_address': '10.0.0.1', + 'floating_network_id': '3', + 'port_id': '6', + 'qos_policy_id': '51', + 'tenant_id': '6', + 'router_id': '7', + 'dns_domain': '9', + 'dns_name': '10', + 'status': 'ACTIVE', + 'revision_number': 12, + 'updated_at': '2016-07-09T12:14:57.233772', + 'subnet_id': '14', + 'tags': ['15', '16'] + }) + + # Mock get floating ip response + mock_connection().network.get_ip = \ + mock.MagicMock(return_value=old_floating_ip_instance) + + # Mock update floating ip response + mock_connection().network.update_ip = \ + mock.MagicMock(return_value=new_floating_ip_instance) + + # Call update floating ip + floating_ip.update(args=new_config) + + def test_list_floating_ips(self, mock_connection): + # Prepare the context for list floating ips operation + self._prepare_context_for_operation( + test_name='FloatingIPTestCase', + ctx_operation_name='cloudify.interfaces.operations.list') + + floating_ips = [ + openstack.network.v2.floating_ip.FloatingIP(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe4', + 'description': 'test_description', + 'name': '10.0.0.1', + 'created_at': '2016-03-09T12:14:57.233772', + 'fixed_ip_address': '', + 'floating_ip_address': '10.0.0.1', + 'floating_network_id': '3', + 'port_id': '5', + 'qos_policy_id': '51', + 'tenant_id': '6', + 'router_id': '7', + 'dns_domain': '9', + 'dns_name': '10', + 'status': 'ACTIVE', + 'revision_number': 12, + 'updated_at': '2016-07-09T12:14:57.233772', + 'subnet_id': '14', + 'tags': ['15', '16'] + }), + openstack.network.v2.floating_ip.FloatingIP(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe3', + 'description': 'test_description_1', + 'name': '10.0.0.2', + 'created_at': '2016-03-09T12:14:57.233772', + 'fixed_ip_address': '', + 'floating_ip_address': '10.0.0.1', + 'floating_network_id': '3', + 'port_id': '6', + 'qos_policy_id': '51', + 'tenant_id': '6', + 'router_id': '7', + 'dns_domain': '9', + 'dns_name': '10', + 'status': 'ACTIVE', + 'revision_number': 12, + 'updated_at': '2016-08-09T12:14:57.233772', + 'subnet_id': '14', + 'tags': ['18', '17'] + }), + ] + + # Mock list floating ip response + mock_connection().network.ips = \ + mock.MagicMock(return_value=floating_ips) + + # Call list floating ips + floating_ip.list_floating_ips() + + # Check if the floating ips list saved as runtime properties + self.assertIn( + 'ip_list', + self._ctx.instance.runtime_properties) + + # Check the size of floating ips list + self.assertEqual( + len(self._ctx.instance.runtime_properties['ip_list']), 2) + + @mock.patch('openstack_sdk.common.OpenstackResource.get_quota_sets') + def test_creation_validation(self, mock_quota_sets, mock_connection): + # Prepare the context for creation validation operation + self._prepare_context_for_operation( + test_name='FloatingIPTestCase', + ctx_operation_name='cloudify.interfaces.validation.creation') + + floating_ips = [ + openstack.network.v2.floating_ip.FloatingIP(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe4', + 'description': 'test_description', + 'name': '10.0.0.1', + 'created_at': '2016-03-09T12:14:57.233772', + 'fixed_ip_address': '', + 'floating_ip_address': '10.0.0.1', + 'floating_network_id': '3', + 'port_id': '5', + 'qos_policy_id': '51', + 'tenant_id': '6', + 'router_id': '7', + 'dns_domain': '9', + 'dns_name': '10', + 'status': 'ACTIVE', + 'revision_number': 12, + 'updated_at': '2016-07-09T12:14:57.233772', + 'subnet_id': '14', + 'tags': ['15', '16'] + }), + openstack.network.v2.floating_ip.FloatingIP(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe3', + 'description': 'test_description_1', + 'name': '10.0.0.2', + 'created_at': '2016-03-09T12:14:57.233772', + 'fixed_ip_address': '', + 'floating_ip_address': '10.0.0.1', + 'floating_network_id': '3', + 'port_id': '6', + 'qos_policy_id': '51', + 'tenant_id': '6', + 'router_id': '7', + 'dns_domain': '9', + 'dns_name': '10', + 'status': 'ACTIVE', + 'revision_number': 12, + 'updated_at': '2016-08-09T12:14:57.233772', + 'subnet_id': '14', + 'tags': ['18', '17'] + }), + ] + + # Mock list floating ip response + mock_connection().network.ips = \ + mock.MagicMock(return_value=floating_ips) + + # Mock the quota size response + mock_quota_sets.return_value = 20 + + # Call creation validation + floating_ip.creation_validation() diff --git a/openstack_plugin/tests/network/test_network.py b/openstack_plugin/tests/network/test_network.py new file mode 100644 index 00000000..c5e8f057 --- /dev/null +++ b/openstack_plugin/tests/network/test_network.py @@ -0,0 +1,389 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Third party imports +import mock +import openstack.network.v2.network + +# Local imports +from openstack_plugin.tests.base import OpenStackTestBase +from openstack_plugin.resources.network import network +from openstack_plugin.constants import (RESOURCE_ID, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + NETWORK_OPENSTACK_TYPE) + + +@mock.patch('openstack.connect') +class NetworkTestCase(OpenStackTestBase): + + def setUp(self): + super(NetworkTestCase, self).setUp() + + @property + def resource_config(self): + return { + 'name': 'test_network', + 'description': 'network_description', + } + + def test_create(self, mock_connection): + # Prepare the context for create operation + self._prepare_context_for_operation( + test_name='NetworkTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.create') + + network_instance = openstack.network.v2.network.Network(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe4', + 'name': 'test_network', + 'admin_state_up': True, + 'availability_zone_hints': ['1', '2'], + 'availability_zones': ['3'], + 'external': True, + 'created_at': '2016-03-09T12:14:57.233772', + 'description': '4', + 'dns_domain': '5', + 'ipv4_address_scope': '6', + 'ipv6_address_scope': '7', + 'is_default': False, + 'mtu': 8, + 'port_security_enabled': True, + 'project_id': '10', + 'provider:network_type': '11', + 'provider:physical_network': '12', + 'provider:segmentation_id': '13', + 'qos_policy_id': '14', + 'revision_number': 15, + 'router:external': True, + 'segments': '16', + 'shared': True, + 'status': '17', + 'subnets': ['18', '19'], + 'updated_at': '2016-07-09T12:14:57.233772', + 'vlan_transparent': False, + + }) + # Mock create network response + mock_connection().network.create_network = \ + mock.MagicMock(return_value=network_instance) + + # Call create network + network.create() + + self.assertEqual(self._ctx.instance.runtime_properties[RESOURCE_ID], + 'a95b5509-c122-4c2f-823e-884bb559afe4') + + self.assertEqual( + self._ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY], + 'test_network') + + self.assertEqual( + self._ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY], + NETWORK_OPENSTACK_TYPE) + + def test_delete(self, mock_connection): + # Prepare the context for delete operation + self._prepare_context_for_operation( + test_name='NetworkTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.delete') + + network_instance = openstack.network.v2.network.Network(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe4', + 'name': 'test_network', + 'admin_state_up': True, + 'availability_zone_hints': ['1', '2'], + 'availability_zones': ['3'], + 'external': True, + 'created_at': '2016-03-09T12:14:57.233772', + 'description': '4', + 'dns_domain': '5', + 'ipv4_address_scope': '6', + 'ipv6_address_scope': '7', + 'is_default': False, + 'mtu': 8, + 'port_security_enabled': True, + 'project_id': '10', + 'provider:network_type': '11', + 'provider:physical_network': '12', + 'provider:segmentation_id': '13', + 'qos_policy_id': '14', + 'revision_number': 15, + 'router:external': True, + 'segments': '16', + 'shared': True, + 'status': '17', + 'subnets': ['18', '19'], + 'updated_at': '2016-07-09T12:14:57.233772', + 'vlan_transparent': False, + + }) + # Mock delete network response + mock_connection().network.delete_network = \ + mock.MagicMock(return_value=None) + + # Mock get network response + mock_connection().network.get_network = \ + mock.MagicMock(return_value=network_instance) + + # Call delete network + network.delete() + + for attr in [RESOURCE_ID, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY]: + self.assertNotIn(attr, self._ctx.instance.runtime_properties) + + def test_update(self, mock_connection): + # Prepare the context for update operation + self._prepare_context_for_operation( + test_name='NetworkTestCase', + ctx_operation_name='cloudify.interfaces.operations.update') + + old_network_instance = openstack.network.v2.network.Network(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe4', + 'name': 'test_network', + 'admin_state_up': True, + 'availability_zone_hints': ['1', '2'], + 'availability_zones': ['3'], + 'external': True, + 'created_at': '2016-03-09T12:14:57.233772', + 'description': '4', + 'dns_domain': '5', + 'ipv4_address_scope': '6', + 'ipv6_address_scope': '7', + 'is_default': False, + 'mtu': 8, + 'port_security_enabled': True, + 'project_id': '10', + 'provider:network_type': '11', + 'provider:physical_network': '12', + 'provider:segmentation_id': '13', + 'qos_policy_id': '14', + 'revision_number': 15, + 'router:external': True, + 'segments': '16', + 'shared': True, + 'status': '17', + 'subnets': ['18', '19'], + 'updated_at': '2016-07-09T12:14:57.233772', + 'vlan_transparent': False, + }) + + new_config = { + 'name': 'test_updated_network', + } + + new_network_instance = openstack.network.v2.network.Network(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe4', + 'name': 'test_updated_network', + 'admin_state_up': True, + 'availability_zone_hints': ['1', '2'], + 'availability_zones': ['3'], + 'external': True, + 'created_at': '2016-03-09T12:14:57.233772', + 'description': '4', + 'dns_domain': '5', + 'ipv4_address_scope': '6', + 'ipv6_address_scope': '7', + 'is_default': False, + 'mtu': 8, + 'port_security_enabled': True, + 'project_id': '10', + 'provider:network_type': '11', + 'provider:physical_network': '12', + 'provider:segmentation_id': '13', + 'qos_policy_id': '14', + 'revision_number': 15, + 'router:external': True, + 'segments': '16', + 'shared': True, + 'status': '17', + 'subnets': ['18', '19'], + 'updated_at': '2016-07-09T12:14:57.233772', + 'vlan_transparent': False, + + }) + + # Mock get network response + mock_connection().network.get_network = \ + mock.MagicMock(return_value=old_network_instance) + + # Mock update network response + mock_connection().network.update_network = \ + mock.MagicMock(return_value=new_network_instance) + + # Call update network + network.update(args=new_config) + + def test_list_networks(self, mock_connection): + # Prepare the context for list projects operation + self._prepare_context_for_operation( + test_name='NetworkTestCase', + ctx_operation_name='cloudify.interfaces.operations.list') + + networks = [ + openstack.network.v2.network.Network(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe4', + 'name': 'test_network_1', + 'admin_state_up': True, + 'availability_zone_hints': ['1', '2'], + 'availability_zones': ['3'], + 'external': True, + 'created_at': '2016-03-09T12:14:57.233772', + 'description': '4', + 'dns_domain': '5', + 'ipv4_address_scope': '6', + 'ipv6_address_scope': '7', + 'is_default': False, + 'mtu': 8, + 'port_security_enabled': True, + 'project_id': '10', + 'provider:network_type': '11', + 'provider:physical_network': '12', + 'provider:segmentation_id': '13', + 'qos_policy_id': '14', + 'revision_number': 15, + 'router:external': True, + 'segments': '16', + 'shared': True, + 'status': '17', + 'subnets': ['18', '19'], + 'updated_at': '2016-07-09T12:14:57.233772', + 'vlan_transparent': False, + }), + openstack.network.v2.network.Network(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe3', + 'name': 'test_network_2', + 'admin_state_up': True, + 'availability_zone_hints': ['1', '2'], + 'availability_zones': ['3'], + 'external': True, + 'created_at': '2016-03-09T12:14:57.233772', + 'description': '4', + 'dns_domain': '5', + 'ipv4_address_scope': '6', + 'ipv6_address_scope': '7', + 'is_default': False, + 'mtu': 8, + 'port_security_enabled': True, + 'project_id': '10', + 'provider:network_type': '11', + 'provider:physical_network': '12', + 'provider:segmentation_id': '13', + 'qos_policy_id': '14', + 'revision_number': 15, + 'router:external': True, + 'segments': '16', + 'shared': True, + 'status': '17', + 'subnets': ['18', '19'], + 'updated_at': '2016-07-09T12:14:57.233772', + 'vlan_transparent': False, + }), + ] + + # Mock list networks response + mock_connection().network.networks = \ + mock.MagicMock(return_value=networks) + + # Call list networks + network.list_networks() + + # Check if the networks list saved as runtime properties + self.assertIn( + 'network_list', + self._ctx.instance.runtime_properties) + + # Check the size of networks list + self.assertEqual( + len(self._ctx.instance.runtime_properties['network_list']), 2) + + @mock.patch('openstack_sdk.common.OpenstackResource.get_quota_sets') + def test_creation_validation(self, mock_quota_sets, mock_connection): + # Prepare the context for creation validation operation + self._prepare_context_for_operation( + test_name='NetworkTestCase', + ctx_operation_name='cloudify.interfaces.validation.creation') + + networks = [ + openstack.network.v2.network.Network(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe4', + 'name': 'test_network_1', + 'admin_state_up': True, + 'availability_zone_hints': ['1', '2'], + 'availability_zones': ['3'], + 'external': True, + 'created_at': '2016-03-09T12:14:57.233772', + 'description': '4', + 'dns_domain': '5', + 'ipv4_address_scope': '6', + 'ipv6_address_scope': '7', + 'is_default': False, + 'mtu': 8, + 'port_security_enabled': True, + 'project_id': '10', + 'provider:network_type': '11', + 'provider:physical_network': '12', + 'provider:segmentation_id': '13', + 'qos_policy_id': '14', + 'revision_number': 15, + 'router:external': True, + 'segments': '16', + 'shared': True, + 'status': '17', + 'subnets': ['18', '19'], + 'updated_at': '2016-07-09T12:14:57.233772', + 'vlan_transparent': False, + }), + openstack.network.v2.network.Network(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe3', + 'name': 'test_network_2', + 'admin_state_up': True, + 'availability_zone_hints': ['1', '2'], + 'availability_zones': ['3'], + 'external': True, + 'created_at': '2016-03-09T12:14:57.233772', + 'description': '4', + 'dns_domain': '5', + 'ipv4_address_scope': '6', + 'ipv6_address_scope': '7', + 'is_default': False, + 'mtu': 8, + 'port_security_enabled': True, + 'project_id': '10', + 'provider:network_type': '11', + 'provider:physical_network': '12', + 'provider:segmentation_id': '13', + 'qos_policy_id': '14', + 'revision_number': 15, + 'router:external': True, + 'segments': '16', + 'shared': True, + 'status': '17', + 'subnets': ['18', '19'], + 'updated_at': '2016-07-09T12:14:57.233772', + 'vlan_transparent': False, + }), + ] + + # Mock list networks response + mock_connection().network.networks = \ + mock.MagicMock(return_value=networks) + + # Mock the quota size response + mock_quota_sets.return_value = 20 + + # Call creation validation + network.creation_validation() diff --git a/openstack_plugin/tests/network/test_port.py b/openstack_plugin/tests/network/test_port.py new file mode 100644 index 00000000..e1394b92 --- /dev/null +++ b/openstack_plugin/tests/network/test_port.py @@ -0,0 +1,844 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Third party imports +import mock +import openstack.network.v2.port + +# Local imports +from openstack_plugin.tests.base import OpenStackTestBase +from openstack_plugin.resources.network import port +from openstack_plugin.constants import (RESOURCE_ID, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + PORT_OPENSTACK_TYPE, + NETWORK_OPENSTACK_TYPE, + SECURITY_GROUP_OPENSTACK_TYPE, + NETWORK_NODE_TYPE, + SECURITY_GROUP_NODE_TYPE) + + +@mock.patch('openstack.connect') +class PortTestCase(OpenStackTestBase): + + def setUp(self): + super(PortTestCase, self).setUp() + + @property + def resource_config(self): + return { + 'name': 'test_port', + 'description': 'port_description', + } + + def test_create(self, mock_connection): + # Prepare the context for create operation + rel_specs = [ + { + 'node': { + 'id': 'network-1', + 'properties': { + 'client_config': self.client_config, + 'resource_config': { + 'name': 'test-network', + } + } + }, + 'instance': { + 'id': 'network-1-efrgsd', + 'runtime_properties': { + RESOURCE_ID: 'a95b5509-c122-4c2f-823e-884bb559afe4', + OPENSTACK_TYPE_PROPERTY: NETWORK_OPENSTACK_TYPE, + OPENSTACK_NAME_PROPERTY: 'test-network' + } + }, + 'type': NETWORK_NODE_TYPE, + }, + { + 'node': { + 'id': 'security-group-1', + 'properties': { + 'client_config': self.client_config, + 'resource_config': { + 'name': 'test-security-group', + } + } + }, + 'instance': { + 'id': 'security-group-1-efrgsd', + 'runtime_properties': { + RESOURCE_ID: 'a95b5509-c122-4c2f-823e-884bb559afe2', + OPENSTACK_TYPE_PROPERTY: SECURITY_GROUP_OPENSTACK_TYPE, + OPENSTACK_NAME_PROPERTY: 'test-security-group' + } + }, + 'type': SECURITY_GROUP_NODE_TYPE, + } + ] + + port_rels = self.get_mock_relationship_ctx_for_node(rel_specs) + self._prepare_context_for_operation( + test_name='PortTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.create', + test_relationships=port_rels) + + port_instance = openstack.network.v2.port.Port(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe1', + 'name': 'test_port', + 'admin_state_up': True, + 'binding_host_id': '3', + 'binding_profile': {'4': 4}, + 'binding_vif_details': {'5': 5}, + 'binding_vif_type': '6', + 'binding_vnic_type': '7', + 'created_at': '2016-03-09T12:14:57.233772', + 'data_plane_status': '32', + 'description': 'port_description', + 'device_id': '9', + 'device_owner': '10', + 'dns_assignment': [{'11': 11}], + 'dns_domain': 'a11', + 'dns_name': '12', + 'extra_dhcp_opts': [{'13': 13}], + 'fixed_ips': [{'10.0.0.1': '10.0.0.2'}], + 'allowed_address_pairs': + [ + { + 'ip_address': '10.0.0.3' + }, + { + 'ip_address': '10.0.0.4' + } + ], + 'mac_address': '00-14-22-01-23-45', + 'network_id': '18', + 'port_security_enabled': True, + 'qos_policy_id': '21', + 'revision_number': 22, + 'security_groups': ['23'], + 'status': '25', + 'tenant_id': '26', + 'updated_at': '2016-07-09T12:14:57.233772', + }) + # Mock create port response + mock_connection().network.create_port = \ + mock.MagicMock(return_value=port_instance) + + # Call create port + port.create() + + self.assertEqual(self._ctx.instance.runtime_properties[RESOURCE_ID], + 'a95b5509-c122-4c2f-823e-884bb559afe1') + + self.assertEqual( + self._ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY], + 'test_port') + + self.assertEqual( + self._ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY], + PORT_OPENSTACK_TYPE) + + self.assertEqual( + self._ctx.instance.runtime_properties['fixed_ips'], + [{'10.0.0.1': '10.0.0.2'}]) + + self.assertEqual( + self._ctx.instance.runtime_properties['mac_address'], + '00-14-22-01-23-45') + + self.assertEqual( + self._ctx.instance.runtime_properties['allowed_address_pairs'], + [{'ip_address': '10.0.0.3'}, {'ip_address': '10.0.0.4'}]) + + def test_delete(self, mock_connection): + # Prepare the context for delete operation + self._prepare_context_for_operation( + test_name='PortTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.delete') + + port_instance = openstack.network.v2.port.Port(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe1', + 'name': 'test_port', + 'admin_state_up': True, + 'binding_host_id': '3', + 'binding_profile': {'4': 4}, + 'binding_vif_details': {'5': 5}, + 'binding_vif_type': '6', + 'binding_vnic_type': '7', + 'created_at': '2016-03-09T12:14:57.233772', + 'data_plane_status': '32', + 'description': 'port_description', + 'device_id': '9', + 'device_owner': '10', + 'dns_assignment': [{'11': 11}], + 'dns_domain': 'a11', + 'dns_name': '12', + 'extra_dhcp_opts': [{'13': 13}], + 'fixed_ips': [{'10.0.0.1': '10.0.0.2'}], + 'allowed_address_pairs': + [ + { + 'ip_address': '10.0.0.3' + }, + { + 'ip_address': '10.0.0.4' + } + ], + 'mac_address': '00-14-22-01-23-45', + 'network_id': '18', + 'port_security_enabled': True, + 'qos_policy_id': '21', + 'revision_number': 22, + 'security_groups': ['23'], + 'status': '25', + 'tenant_id': '26', + 'updated_at': '2016-07-09T12:14:57.233772', + }) + # Mock delete port response + mock_connection().network.delete_port = \ + mock.MagicMock(return_value=None) + + # Mock get port response + mock_connection().network.get_port = \ + mock.MagicMock(return_value=port_instance) + + # Call delete port + port.delete() + + for attr in [RESOURCE_ID, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + 'fixed_ips', + 'mac_address', + 'allowed_address_pairs']: + self.assertNotIn(attr, self._ctx.instance.runtime_properties) + + def test_update(self, mock_connection): + # Prepare the context for update operation + self._prepare_context_for_operation( + test_name='PortTestCase', + ctx_operation_name='cloudify.interfaces.operations.update') + + old_port_instance = openstack.network.v2.port.Port(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe1', + 'name': 'test_port', + 'admin_state_up': True, + 'binding_host_id': '3', + 'binding_profile': {'4': 4}, + 'binding_vif_details': {'5': 5}, + 'binding_vif_type': '6', + 'binding_vnic_type': '7', + 'created_at': '2016-03-09T12:14:57.233772', + 'data_plane_status': '32', + 'description': 'port_description', + 'device_id': '9', + 'device_owner': '10', + 'dns_assignment': [{'11': 11}], + 'dns_domain': 'a11', + 'dns_name': '12', + 'extra_dhcp_opts': [{'13': 13}], + 'fixed_ips': [{'10.0.0.1': '10.0.0.2'}], + 'allowed_address_pairs': + [ + { + 'ip_address': '10.0.0.3' + }, + { + 'ip_address': '10.0.0.4' + } + ], + 'mac_address': '00-14-22-01-23-45', + 'network_id': '18', + 'port_security_enabled': True, + 'qos_policy_id': '21', + 'revision_number': 22, + 'security_groups': ['23'], + 'status': '25', + 'tenant_id': '26', + 'updated_at': '2016-07-09T12:14:57.233772', + }) + + new_config = { + 'name': 'test_updated_port', + } + + new_port_instance = \ + openstack.network.v2.port.Port(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe1', + 'name': 'test_port', + 'admin_state_up': True, + 'binding_host_id': '3', + 'binding_profile': {'4': 4}, + 'binding_vif_details': {'5': 5}, + 'binding_vif_type': '6', + 'binding_vnic_type': '7', + 'created_at': '2016-03-09T12:14:57.233772', + 'data_plane_status': '32', + 'description': 'port_description', + 'device_id': '9', + 'device_owner': '10', + 'dns_assignment': [{'11': 11}], + 'dns_domain': 'a11', + 'dns_name': '12', + 'extra_dhcp_opts': [{'13': 13}], + 'fixed_ips': [{'10.0.0.1': '10.0.0.2'}], + 'allowed_address_pairs': + [ + { + 'ip_address': '10.0.0.3' + }, + { + 'ip_address': '10.0.0.4' + } + ], + 'mac_address': '00-14-22-01-23-45', + 'network_id': '18', + 'port_security_enabled': True, + 'qos_policy_id': '21', + 'revision_number': 22, + 'security_groups': ['23'], + 'status': '25', + 'tenant_id': '26', + 'updated_at': '2016-07-09T12:14:57.233772', + }) + + # Mock get port response + mock_connection().network.get_port = \ + mock.MagicMock(return_value=old_port_instance) + + # Mock update port response + mock_connection().network.update_port = \ + mock.MagicMock(return_value=new_port_instance) + + # Call update port + port.update(args=new_config) + + def test_create_external_port(self, mock_connection): + # Prepare relationship data which is connected to external port + # resource + rel_specs = [ + { + 'node': { + 'id': 'network-1', + 'properties': { + 'client_config': self.client_config, + 'resource_config': { + 'name': 'test-network', + } + } + }, + 'instance': { + 'id': 'network-1-efrgsd', + 'runtime_properties': { + RESOURCE_ID: 'a95b5509-c122-4c2f-823e-884bb559afe4', + OPENSTACK_TYPE_PROPERTY: NETWORK_OPENSTACK_TYPE, + OPENSTACK_NAME_PROPERTY: 'test-network' + } + }, + 'type': NETWORK_NODE_TYPE, + }, + ] + + port_rels = self.get_mock_relationship_ctx_for_node(rel_specs) + + # Update external port, will be part of create operation when use + # external resource is set to True + properties = dict() + # Enable external resource + properties['use_external_resource'] = True + + # Add node properties config to this dict + properties.update(self.node_properties) + # Reset resource config since we are going to use external resource + # and do not care about the resource config data + properties['resource_config'] = {} + # Set resource id so that we can lookup the external resource + properties['resource_config']['id'] = \ + 'a95b5509-c122-4c2f-823e-884bb559afe1' + + # Set allowed address resource pairs + properties['resource_config']['allowed_address_pairs'] = [ + { + 'ip_address': '10.0.0.5' + }, + + { + 'ip_address': '10.0.0.6' + } + ] + + self._prepare_context_for_operation( + test_name='PortTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.create', + test_properties=properties, + test_relationships=port_rels) + + port_instance = openstack.network.v2.port.Port(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe1', + 'name': 'test_port', + 'admin_state_up': True, + 'binding_host_id': '3', + 'binding_profile': {'4': 4}, + 'binding_vif_details': {'5': 5}, + 'binding_vif_type': '6', + 'binding_vnic_type': '7', + 'created_at': '2016-03-09T12:14:57.233772', + 'data_plane_status': '32', + 'description': 'port_description', + 'device_id': '9', + 'device_owner': '10', + 'dns_assignment': [{'11': 11}], + 'dns_domain': 'a11', + 'dns_name': '12', + 'extra_dhcp_opts': [{'13': 13}], + 'fixed_ips': [{'10.0.0.1': '10.0.0.2'}], + 'allowed_address_pairs': [ + { + 'ip_address': '10.0.0.3' + }, + { + 'ip_address': '10.0.0.4' + } + ], + 'mac_address': '00-14-22-01-23-45', + 'network_id': 'a95b5509-c122-4c2f-823e-884bb559afe4', + 'port_security_enabled': True, + 'qos_policy_id': '21', + 'revision_number': 22, + 'security_groups': ['23'], + 'status': '25', + 'tenant_id': '26', + 'updated_at': '2016-07-09T12:14:57.233772', + }) + + updated_port_instance = openstack.network.v2.port.Port(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe1', + 'name': 'test_port', + 'admin_state_up': True, + 'binding_host_id': '3', + 'binding_profile': {'4': 4}, + 'binding_vif_details': {'5': 5}, + 'binding_vif_type': '6', + 'binding_vnic_type': '7', + 'created_at': '2016-03-09T12:14:57.233772', + 'data_plane_status': '32', + 'description': 'port_description', + 'device_id': '9', + 'device_owner': '10', + 'dns_assignment': [{'11': 11}], + 'dns_domain': 'a11', + 'dns_name': '12', + 'extra_dhcp_opts': [{'13': 13}], + 'fixed_ips': [{'10.0.0.1': '10.0.0.2'}], + 'allowed_address_pairs': [ + { + 'ip_address': '10.0.0.3' + }, + { + 'ip_address': '10.0.0.4' + }, + { + 'ip_address': '10.0.0.5' + }, + { + 'ip_address': '10.0.0.6' + } + ], + 'mac_address': '00-14-22-01-23-45', + 'network_id': 'a95b5509-c122-4c2f-823e-884bb559afe4', + 'port_security_enabled': True, + 'qos_policy_id': '21', + 'revision_number': 22, + 'security_groups': ['23'], + 'status': '25', + 'tenant_id': '26', + 'updated_at': '2016-07-09T12:14:57.233772', + }) + + # Mock get port response + mock_connection().network.get_port = \ + mock.MagicMock(return_value=port_instance) + + # Mock update port response + mock_connection().network.update_port = \ + mock.MagicMock(return_value=updated_port_instance) + + # Call create port + port.create() + + for attr in [RESOURCE_ID, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + 'fixed_ips', + 'mac_address', + 'allowed_address_pairs']: + self.assertIn(attr, self._ctx.instance.runtime_properties) + + def test_delete_external_port(self, mock_connection): + # Prepare relationship data which is connected to external port + # resource + rel_specs = [ + { + 'node': { + 'id': 'network-1', + 'properties': { + 'client_config': self.client_config, + 'resource_config': { + 'name': 'test-network', + } + } + }, + 'instance': { + 'id': 'network-1-efrgsd', + 'runtime_properties': { + RESOURCE_ID: 'a95b5509-c122-4c2f-823e-884bb559afe4', + OPENSTACK_TYPE_PROPERTY: NETWORK_OPENSTACK_TYPE, + OPENSTACK_NAME_PROPERTY: 'test-network' + } + }, + 'type': NETWORK_NODE_TYPE, + }, + ] + + port_rels = self.get_mock_relationship_ctx_for_node(rel_specs) + + properties = dict() + # Enable external resource + properties['use_external_resource'] = True + + # Add node properties config to this dict + properties.update(self.node_properties) + # Reset resource config since we are going to use external resource + # and do not care about the resource config data + properties['resource_config'] = {} + # Set resource id so that we can lookup the external resource + properties['resource_config']['id'] = \ + 'a95b5509-c122-4c2f-823e-884bb559afe1' + + # Set allowed address resource pairs + properties['resource_config']['allowed_address_pairs'] = [ + { + 'ip_address': '10.0.0.5' + }, + + { + 'ip_address': '10.0.0.6' + } + ] + + self._prepare_context_for_operation( + test_name='PortTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.delete', + test_properties=properties, + test_relationships=port_rels, + test_runtime_properties={ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe1' + }) + + port_instance = openstack.network.v2.port.Port(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe1', + 'name': 'test_port', + 'admin_state_up': True, + 'binding_host_id': '3', + 'binding_profile': {'4': 4}, + 'binding_vif_details': {'5': 5}, + 'binding_vif_type': '6', + 'binding_vnic_type': '7', + 'created_at': '2016-03-09T12:14:57.233772', + 'data_plane_status': '32', + 'description': 'port_description', + 'device_id': '9', + 'device_owner': '10', + 'dns_assignment': [{'11': 11}], + 'dns_domain': 'a11', + 'dns_name': '12', + 'extra_dhcp_opts': [{'13': 13}], + 'fixed_ips': [{'10.0.0.1': '10.0.0.2'}], + 'allowed_address_pairs': [ + { + 'ip_address': '10.0.0.3' + }, + { + 'ip_address': '10.0.0.4' + }, + { + 'ip_address': '10.0.0.5' + }, + { + 'ip_address': '10.0.0.6' + } + ], + 'mac_address': '00-14-22-01-23-45', + 'network_id': 'a95b5509-c122-4c2f-823e-884bb559afe4', + 'port_security_enabled': True, + 'qos_policy_id': '21', + 'revision_number': 22, + 'security_groups': ['23'], + 'status': '25', + 'tenant_id': '26', + 'updated_at': '2016-07-09T12:14:57.233772', + }) + + updated_port_instance = openstack.network.v2.port.Port(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe1', + 'name': 'test_port', + 'admin_state_up': True, + 'binding_host_id': '3', + 'binding_profile': {'4': 4}, + 'binding_vif_details': {'5': 5}, + 'binding_vif_type': '6', + 'binding_vnic_type': '7', + 'created_at': '2016-03-09T12:14:57.233772', + 'data_plane_status': '32', + 'description': 'port_description', + 'device_id': '9', + 'device_owner': '10', + 'dns_assignment': [{'11': 11}], + 'dns_domain': 'a11', + 'dns_name': '12', + 'extra_dhcp_opts': [{'13': 13}], + 'fixed_ips': [{'10.0.0.1': '10.0.0.2'}], + 'allowed_address_pairs': [ + { + 'ip_address': '10.0.0.3' + }, + { + 'ip_address': '10.0.0.4' + } + ], + 'mac_address': '00-14-22-01-23-45', + 'network_id': 'a95b5509-c122-4c2f-823e-884bb559afe4', + 'port_security_enabled': True, + 'qos_policy_id': '21', + 'revision_number': 22, + 'security_groups': ['23'], + 'status': '25', + 'tenant_id': '26', + 'updated_at': '2016-07-09T12:14:57.233772', + }) + + # Mock get port response + mock_connection().network.get_port = \ + mock.MagicMock(return_value=port_instance) + + # Mock update port response + mock_connection().network.update_port = \ + mock.MagicMock(return_value=updated_port_instance) + + # Call delete port + port.delete() + + for attr in [RESOURCE_ID, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + 'fixed_ips', + 'mac_address', + 'allowed_address_pairs']: + self.assertNotIn(attr, self._ctx.instance.runtime_properties) + + def test_list_ports(self, mock_connection): + # Prepare the context for list ports operation + self._prepare_context_for_operation( + test_name='PortTestCase', + ctx_operation_name='cloudify.interfaces.operations.list') + + ports = [ + openstack.network.v2.port.Port(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe1', + 'name': 'test_port_1', + 'admin_state_up': True, + 'binding_host_id': '3', + 'binding_profile': {'4': 4}, + 'binding_vif_details': {'5': 5}, + 'binding_vif_type': '6', + 'binding_vnic_type': '7', + 'created_at': '2016-03-09T12:14:57.233772', + 'data_plane_status': '32', + 'description': 'port_description_2', + 'device_id': '9', + 'device_owner': '10', + 'dns_assignment': [{'11': 11}], + 'dns_domain': 'a11', + 'dns_name': '12', + 'extra_dhcp_opts': [{'13': 13}], + 'fixed_ips': [{'10.0.0.1': '10.0.0.2'}], + 'allowed_address_pairs': + [ + { + 'ip_address': '10.0.0.3' + }, + { + 'ip_address': '10.0.0.4' + } + ], + 'mac_address': '00-14-22-01-23-45', + 'network_id': '18', + 'port_security_enabled': True, + 'qos_policy_id': '21', + 'revision_number': 22, + 'security_groups': ['23'], + 'status': '25', + 'tenant_id': '26', + 'updated_at': '2016-07-09T12:14:57.233772', + }), + openstack.network.v2.port.Port(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe2', + 'name': 'test_port_1', + 'admin_state_up': True, + 'binding_host_id': '3', + 'binding_profile': {'4': 4}, + 'binding_vif_details': {'5': 5}, + 'binding_vif_type': '6', + 'binding_vnic_type': '7', + 'created_at': '2016-03-09T12:14:57.233772', + 'data_plane_status': '32', + 'description': 'port_description_2', + 'device_id': '9', + 'device_owner': '10', + 'dns_assignment': [{'11': 11}], + 'dns_domain': 'a11', + 'dns_name': '12', + 'extra_dhcp_opts': [{'13': 13}], + 'fixed_ips': [{'10.0.0.3': '10.0.0.4'}], + 'allowed_address_pairs': + [ + { + 'ip_address': '10.0.0.3' + }, + { + 'ip_address': '10.0.0.4' + } + ], + 'mac_address': '00-41-23-23-23-24', + 'network_id': '18', + 'port_security_enabled': True, + 'qos_policy_id': '21', + 'revision_number': 22, + 'security_groups': ['23'], + 'status': '25', + 'tenant_id': '26', + 'updated_at': '2016-07-09T12:14:57.233772', + }), + ] + + # Mock list port response + mock_connection().network.ports = mock.MagicMock(return_value=ports) + + # Call list ports + port.list_ports() + + # Check if the ports list saved as runtime properties + self.assertIn( + 'port_list', + self._ctx.instance.runtime_properties) + + # Check the size of ports list + self.assertEqual( + len(self._ctx.instance.runtime_properties['port_list']), 2) + + @mock.patch('openstack_sdk.common.OpenstackResource.get_quota_sets') + def test_creation_validation(self, mock_quota_sets, mock_connection): + # Prepare the context for creation validation operation + self._prepare_context_for_operation( + test_name='PortTestCase', + ctx_operation_name='cloudify.interfaces.validation.creation') + + ports = [ + openstack.network.v2.port.Port(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe1', + 'name': 'test_port_1', + 'admin_state_up': True, + 'binding_host_id': '3', + 'binding_profile': {'4': 4}, + 'binding_vif_details': {'5': 5}, + 'binding_vif_type': '6', + 'binding_vnic_type': '7', + 'created_at': '2016-03-09T12:14:57.233772', + 'data_plane_status': '32', + 'description': 'port_description_2', + 'device_id': '9', + 'device_owner': '10', + 'dns_assignment': [{'11': 11}], + 'dns_domain': 'a11', + 'dns_name': '12', + 'extra_dhcp_opts': [{'13': 13}], + 'fixed_ips': [{'10.0.0.1': '10.0.0.2'}], + 'allowed_address_pairs': + [ + { + 'ip_address': '10.0.0.3' + }, + { + 'ip_address': '10.0.0.4' + } + ], + 'mac_address': '00-14-22-01-23-45', + 'network_id': '18', + 'port_security_enabled': True, + 'qos_policy_id': '21', + 'revision_number': 22, + 'security_groups': ['23'], + 'status': '25', + 'tenant_id': '26', + 'updated_at': '2016-07-09T12:14:57.233772', + }), + openstack.network.v2.port.Port(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe2', + 'name': 'test_port_1', + 'admin_state_up': True, + 'binding_host_id': '3', + 'binding_profile': {'4': 4}, + 'binding_vif_details': {'5': 5}, + 'binding_vif_type': '6', + 'binding_vnic_type': '7', + 'created_at': '2016-03-09T12:14:57.233772', + 'data_plane_status': '32', + 'description': 'port_description_2', + 'device_id': '9', + 'device_owner': '10', + 'dns_assignment': [{'11': 11}], + 'dns_domain': 'a11', + 'dns_name': '12', + 'extra_dhcp_opts': [{'13': 13}], + 'fixed_ips': [{'10.0.0.3': '10.0.0.4'}], + 'allowed_address_pairs': + [ + { + 'ip_address': '10.0.0.3' + }, + { + 'ip_address': '10.0.0.4' + } + ], + 'mac_address': '00-41-23-23-23-24', + 'network_id': '18', + 'port_security_enabled': True, + 'qos_policy_id': '21', + 'revision_number': 22, + 'security_groups': ['23'], + 'status': '25', + 'tenant_id': '26', + 'updated_at': '2016-07-09T12:14:57.233772', + }), + ] + + # Mock list port response + mock_connection().network.ports = mock.MagicMock(return_value=ports) + + # Mock the quota size response + mock_quota_sets.return_value = 20 + + # Call creation validation + port.creation_validation() diff --git a/openstack_plugin/tests/network/test_router.py b/openstack_plugin/tests/network/test_router.py new file mode 100644 index 00000000..2830e460 --- /dev/null +++ b/openstack_plugin/tests/network/test_router.py @@ -0,0 +1,603 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Third party imports +import mock +import openstack.network.v2.router +import openstack.network.v2.network + +# Local imports +from openstack_plugin.tests.base import OpenStackTestBase +from openstack_plugin.resources.network import router +from openstack_plugin.constants import (RESOURCE_ID, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + ROUTER_OPENSTACK_TYPE, + NETWORK_OPENSTACK_TYPE, + NETWORK_NODE_TYPE) + + +@mock.patch('openstack.connect') +class RouterTestCase(OpenStackTestBase): + + def setUp(self): + super(RouterTestCase, self).setUp() + + @property + def resource_config(self): + return { + 'name': 'test_router', + 'description': 'router_description', + } + + def test_create(self, mock_connection): + # Prepare the context for create operation + rel_specs = [ + { + 'node': { + 'id': 'ext-network-1', + 'properties': { + 'client_config': self.client_config, + 'resource_config': { + 'name': 'test-network', + } + } + }, + 'instance': { + 'id': 'ext-network-1-efrgsd', + 'runtime_properties': { + RESOURCE_ID: 'a95b5509-c122-4c2f-823e-884bb559afe4', + OPENSTACK_TYPE_PROPERTY: NETWORK_OPENSTACK_TYPE, + OPENSTACK_NAME_PROPERTY: 'test-ext-network' + } + }, + 'type': NETWORK_NODE_TYPE, + }, + ] + + router_rels = self.get_mock_relationship_ctx_for_node(rel_specs) + self._prepare_context_for_operation( + test_name='RouterTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.create', + test_relationships=router_rels) + + router_instance = openstack.network.v2.router.Router(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_router', + 'description': 'test_description', + 'availability_zone_hints': ['1'], + 'availability_zones': ['2'], + 'created_at': 'timestamp1', + 'distributed': False, + 'external_gateway_info': { + 'network_id': 'a95b5509-c122-4c2f-823e-884bb559afe4' + }, + 'flavor_id': '5', + 'ha': False, + 'revision': 7, + 'routes': ['8'], + 'status': '9', + 'tenant_id': '10', + 'updated_at': 'timestamp2', + }) + + network_instance = openstack.network.v2.network.Network(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe4', + 'name': 'test_network', + 'admin_state_up': True, + 'availability_zone_hints': ['1', '2'], + 'availability_zones': ['3'], + 'is_router_external': True, + 'created_at': '2016-03-09T12:14:57.233772', + 'description': '4', + 'dns_domain': '5', + 'ipv4_address_scope': '6', + 'ipv6_address_scope': '7', + 'is_default': False, + 'mtu': 8, + 'port_security_enabled': True, + 'project_id': '10', + 'provider:network_type': '11', + 'provider:physical_network': '12', + 'provider:segmentation_id': '13', + 'qos_policy_id': '14', + 'revision_number': 15, + 'router:external': True, + 'segments': '16', + 'shared': True, + 'status': '17', + 'subnets': ['18', '19'], + 'updated_at': '2016-07-09T12:14:57.233772', + 'vlan_transparent': False, + + }) + + # Mock create router response + mock_connection().network.create_router = \ + mock.MagicMock(return_value=router_instance) + + # Mock get network response + mock_connection().network.get_network = \ + mock.MagicMock(return_value=network_instance) + + # Call create router + router.create() + + self.assertEqual(self._ctx.instance.runtime_properties[RESOURCE_ID], + 'a95b5509-c122-4c2f-823e-884bb559afe8') + + self.assertEqual( + self._ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY], + 'test_router') + + self.assertEqual( + self._ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY], + ROUTER_OPENSTACK_TYPE) + + def test_delete(self, mock_connection): + # Prepare the context for delete operation + self._prepare_context_for_operation( + test_name='RouterTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.delete') + + router_instance = openstack.network.v2.router.Router(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_router', + 'description': 'test_description', + 'availability_zone_hints': ['1'], + 'availability_zones': ['2'], + 'created_at': 'timestamp1', + 'distributed': False, + 'external_gateway_info': { + 'network_id': 'a95b5509-c122-4c2f-823e-884bb559afe4' + }, + 'flavor_id': '5', + 'ha': False, + 'revision': 7, + 'routes': ['8'], + 'status': '9', + 'tenant_id': '10', + 'updated_at': 'timestamp2', + }) + # Mock delete router response + mock_connection().network.delete_router = \ + mock.MagicMock(return_value=None) + + # Mock get router response + mock_connection().network.get_router = \ + mock.MagicMock(return_value=router_instance) + + # Call delete router + router.delete() + + for attr in [RESOURCE_ID, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY]: + self.assertNotIn(attr, self._ctx.instance.runtime_properties) + + def test_update(self, mock_connection): + # Prepare the context for update operation + self._prepare_context_for_operation( + test_name='RouterTestCase', + ctx_operation_name='cloudify.interfaces.operations.update') + + old_router_instance = openstack.network.v2.router.Router(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_router', + 'description': 'test_description', + 'availability_zone_hints': ['1'], + 'availability_zones': ['2'], + 'created_at': 'timestamp1', + 'distributed': False, + 'external_gateway_info': { + 'network_id': 'a95b5509-c122-4c2f-823e-884bb559afe4' + }, + 'flavor_id': '5', + 'ha': False, + 'revision': 7, + 'routes': ['8'], + 'status': '9', + 'tenant_id': '10', + 'updated_at': 'timestamp2', + }) + + new_config = { + 'name': 'test_updated_router', + } + + new_router_instance = \ + openstack.network.v2.router.Router(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_updated_router', + 'description': 'test_description', + 'availability_zone_hints': ['1'], + 'availability_zones': ['2'], + 'created_at': 'timestamp1', + 'distributed': False, + 'external_gateway_info': { + 'network_id': 'a95b5509-c122-4c2f-823e-884bb559afe4' + }, + 'flavor_id': '5', + 'ha': False, + 'revision': 7, + 'routes': ['8'], + 'status': '9', + 'tenant_id': '10', + 'updated_at': 'timestamp2', + }) + + # Mock get router response + mock_connection().network.get_router = \ + mock.MagicMock(return_value=old_router_instance) + + # Mock update router response + mock_connection().network.update_router = \ + mock.MagicMock(return_value=new_router_instance) + + # Call update router + router.update(args=new_config) + + def test_add_routes(self, mock_connection): + # Prepare the context for start operation + self._prepare_context_for_operation( + test_name='RouterTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.start', + test_runtime_properties={ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8' + }) + + old_router_instance = openstack.network.v2.router.Router(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_router', + 'description': 'test_description', + 'availability_zone_hints': ['1'], + 'availability_zones': ['2'], + 'created_at': 'timestamp1', + 'distributed': False, + 'external_gateway_info': { + 'network_id': 'a95b5509-c122-4c2f-823e-884bb559afe4' + }, + 'flavor_id': '5', + 'ha': False, + 'revision': 7, + 'routes': [], + 'status': '9', + 'tenant_id': '10', + 'updated_at': 'timestamp2', + }) + + new_config = { + 'routes': [ + { + 'destination': '10.10.4.0/24', + 'nexthop': '192.168.123.123' + } + ] + } + + new_router_instance = \ + openstack.network.v2.router.Router(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_updated_router', + 'description': 'test_description', + 'availability_zone_hints': ['1'], + 'availability_zones': ['2'], + 'created_at': 'timestamp1', + 'distributed': False, + 'external_gateway_info': { + 'network_id': 'a95b5509-c122-4c2f-823e-884bb559afe4' + }, + 'flavor_id': '5', + 'ha': False, + 'revision': 7, + 'routes': [ + { + 'destination': '10.10.4.0/24', + 'nexthop': '192.168.123.123' + } + ], + 'status': '9', + 'tenant_id': '10', + 'updated_at': 'timestamp2', + }) + + # Mock get router response + mock_connection().network.get_router = \ + mock.MagicMock(return_value=old_router_instance) + + # Mock update router response + mock_connection().network.update_router = \ + mock.MagicMock(return_value=new_router_instance) + + # Call start router + router.start(**new_config) + + self.assertEqual(self._ctx.instance.runtime_properties['routes'], + new_config['routes']) + + def test_remove_routes(self, mock_connection): + # Prepare the context for start operation + self._prepare_context_for_operation( + test_name='RouterTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.stop', + test_runtime_properties={ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'routes': [ + { + 'destination': '10.10.4.0/24', + 'nexthop': '192.168.123.123' + } + ] + }) + + old_router_instance = openstack.network.v2.router.Router(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_updated_router', + 'description': 'test_description', + 'availability_zone_hints': ['1'], + 'availability_zones': ['2'], + 'created_at': 'timestamp1', + 'distributed': False, + 'external_gateway_info': { + 'network_id': 'a95b5509-c122-4c2f-823e-884bb559afe4' + }, + 'flavor_id': '5', + 'ha': False, + 'revision': 7, + 'routes': [ + { + 'destination': '10.10.4.0/24', + 'nexthop': '192.168.123.123' + } + ], + 'status': '9', + 'tenant_id': '10', + 'updated_at': 'timestamp2', + }) + + new_router_instance = \ + openstack.network.v2.router.Router(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_updated_router', + 'description': 'test_description', + 'availability_zone_hints': ['1'], + 'availability_zones': ['2'], + 'created_at': 'timestamp1', + 'distributed': False, + 'external_gateway_info': { + 'network_id': 'a95b5509-c122-4c2f-823e-884bb559afe4' + }, + 'flavor_id': '5', + 'ha': False, + 'revision': 7, + 'routes': [], + 'status': '9', + 'tenant_id': '10', + 'updated_at': 'timestamp2', + }) + + # Mock get router response + mock_connection().network.get_router = \ + mock.MagicMock(return_value=old_router_instance) + + # Mock update router response + mock_connection().network.update_router = \ + mock.MagicMock(return_value=new_router_instance) + + # Call stop router + router.stop() + + def test_add_interface_to_router(self, mock_connection): + # Prepare the context for postconfigure operation + self._prepare_context_for_operation( + test_name='RouterTestCase', + ctx_operation_name='cloudify.interfaces.relationship_lifecycle.' + 'postconfigure', + test_runtime_properties={ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8' + }) + + router_instance = openstack.network.v2.router.Router(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_router', + 'description': 'test_description', + 'availability_zone_hints': ['1'], + 'availability_zones': ['2'], + 'created_at': 'timestamp1', + 'distributed': False, + 'external_gateway_info': { + 'network_id': 'a95b5509-c122-4c2f-823e-884bb559afe4' + }, + 'flavor_id': '5', + 'ha': False, + 'revision': 7, + 'routes': [], + 'status': '9', + 'tenant_id': '10', + 'updated_at': 'timestamp2', + }) + # Mock get router response + mock_connection().network.get_router = \ + mock.MagicMock(return_value=router_instance) + + # Mock add interface router response + mock_connection().network.add_interface_to_router = \ + mock.MagicMock(return_value=router_instance) + + # Call add interface to router + router.add_interface_to_router( + **{'port_id': 'a95b5509-c122-4c2f-823e-884bb559afe3'}) + + def test_remove_interface_from_router(self, mock_connection): + # Prepare the context for unlink operation + self._prepare_context_for_operation( + test_name='RouterTestCase', + ctx_operation_name='cloudify.interfaces.relationship_lifecycle.' + 'unlink', + test_runtime_properties={ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8' + }) + + router_instance = openstack.network.v2.router.Router(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_router', + 'description': 'test_description', + 'availability_zone_hints': ['1'], + 'availability_zones': ['2'], + 'created_at': 'timestamp1', + 'distributed': False, + 'external_gateway_info': { + 'network_id': 'a95b5509-c122-4c2f-823e-884bb559afe4' + }, + 'flavor_id': '5', + 'ha': False, + 'revision': 7, + 'routes': [], + 'status': '9', + 'tenant_id': '10', + 'updated_at': 'timestamp2', + }) + # Mock get router response + mock_connection().network.get_router = \ + mock.MagicMock(return_value=router_instance) + + # Mock remove router interface response + mock_connection().network.remove_interface_from_router = \ + mock.MagicMock(return_value=router_instance) + + # Call remove interface from router + router.remove_interface_from_router( + **{'port_id': 'a95b5509-c122-4c2f-823e-884bb559afe3'}) + + def test_list_routers(self, mock_connection): + # Prepare the context for list routers operation + self._prepare_context_for_operation( + test_name='RouterTestCase', + ctx_operation_name='cloudify.interfaces.operations.list') + + routers = [ + openstack.network.v2.router.Router(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_router_1', + 'description': 'test_description', + 'availability_zone_hints': ['1'], + 'availability_zones': ['2'], + 'created_at': 'timestamp1', + 'distributed': False, + 'external_gateway_info': { + 'network_id': 'a95b5509-c122-4c2f-823e-884bb559afe4' + }, + 'flavor_id': '5', + 'ha': False, + 'revision': 7, + 'routes': ['8'], + 'status': '9', + 'tenant_id': '10', + 'updated_at': 'timestamp2', + }), + openstack.network.v2.router.Router(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afr8', + 'name': 'test_router_2', + 'description': 'test_description', + 'availability_zone_hints': ['1'], + 'availability_zones': ['2'], + 'created_at': 'timestamp1', + 'distributed': False, + 'external_gateway_info': { + 'network_id': 'a95b5509-c122-4c2f-823e-884bb559afe4' + }, + 'flavor_id': '5', + 'ha': False, + 'revision': 7, + 'routes': ['8'], + 'status': '9', + 'tenant_id': '10', + 'updated_at': 'timestamp2', + }), + ] + + # Mock list routers response + mock_connection().network.routers = \ + mock.MagicMock(return_value=routers) + + # Call list routers + router.list_routers() + + # Check if the routers list saved as runtime properties + self.assertIn( + 'router_list', + self._ctx.instance.runtime_properties) + + # Check the size of routers list + self.assertEqual( + len(self._ctx.instance.runtime_properties['router_list']), 2) + + @mock.patch('openstack_sdk.common.OpenstackResource.get_quota_sets') + def test_creation_validation(self, mock_quota_sets, mock_connection): + # Prepare the context for creation validation operation + self._prepare_context_for_operation( + test_name='RouterTestCase', + ctx_operation_name='cloudify.interfaces.validation.creation') + + routers = [ + openstack.network.v2.router.Router(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_router_1', + 'description': 'test_description', + 'availability_zone_hints': ['1'], + 'availability_zones': ['2'], + 'created_at': 'timestamp1', + 'distributed': False, + 'external_gateway_info': { + 'network_id': 'a95b5509-c122-4c2f-823e-884bb559afe4' + }, + 'flavor_id': '5', + 'ha': False, + 'revision': 7, + 'routes': ['8'], + 'status': '9', + 'tenant_id': '10', + 'updated_at': 'timestamp2', + }), + openstack.network.v2.router.Router(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afr8', + 'name': 'test_router_2', + 'description': 'test_description', + 'availability_zone_hints': ['1'], + 'availability_zones': ['2'], + 'created_at': 'timestamp1', + 'distributed': False, + 'external_gateway_info': { + 'network_id': 'a95b5509-c122-4c2f-823e-884bb559afe4' + }, + 'flavor_id': '5', + 'ha': False, + 'revision': 7, + 'routes': ['8'], + 'status': '9', + 'tenant_id': '10', + 'updated_at': 'timestamp2', + }), + ] + + # Mock list router response + mock_connection().network.routers = \ + mock.MagicMock(return_value=routers) + + # Mock the quota size response + mock_quota_sets.return_value = 20 + + # Call creation validation + router.creation_validation() diff --git a/openstack_plugin/tests/network/test_security_group.py b/openstack_plugin/tests/network/test_security_group.py new file mode 100644 index 00000000..81215b1c --- /dev/null +++ b/openstack_plugin/tests/network/test_security_group.py @@ -0,0 +1,418 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Third party imports +import mock +import openstack.network.v2.security_group +import openstack.network.v2.security_group_rule + +# Local imports +from openstack_plugin.tests.base import OpenStackTestBase +from openstack_plugin.resources.network import security_group +from openstack_plugin.constants import (RESOURCE_ID, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + SECURITY_GROUP_OPENSTACK_TYPE) + + +@mock.patch('openstack.connect') +class SecurityGroupTestCase(OpenStackTestBase): + + def setUp(self): + super(SecurityGroupTestCase, self).setUp() + + @property + def resource_config(self): + return { + 'name': 'test_security_group', + 'description': 'security_group_description', + } + + def test_create(self, mock_connection): + # Prepare the context for create operation + self._prepare_context_for_operation( + test_name='SecurityGroupTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.create') + + security_group_instance = \ + openstack.network.v2.security_group.SecurityGroup(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_security_group', + 'created_at': '2016-10-04T12:14:57.233772', + 'description': '1', + 'revision_number': 3, + 'tenant_id': '4', + 'updated_at': '2016-10-14T12:16:57.233772', + 'tags': ['5'] + }) + # Mock create security group response + mock_connection().network.create_security_group = \ + mock.MagicMock(return_value=security_group_instance) + + # Call create security group + security_group.create() + + self.assertEqual(self._ctx.instance.runtime_properties[RESOURCE_ID], + 'a95b5509-c122-4c2f-823e-884bb559afe8') + + self.assertEqual( + self._ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY], + 'test_security_group') + + self.assertEqual( + self._ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY], + SECURITY_GROUP_OPENSTACK_TYPE) + + def test_configure(self, mock_connection): + # Prepare the context for configure operation + self._prepare_context_for_operation( + test_name='SecurityGroupTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.configure', + test_runtime_properties={ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8' + }) + + security_group_rules = [ + openstack.network.v2.security_group_rule.SecurityGroupRule(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'created_at': '0', + 'description': '1', + 'direction': 'ingress', + 'ethertype': '3', + 'port_range_max': '80', + 'port_range_min': '80', + 'protocol': 'tcp', + 'remote_group_id': '7', + 'remote_ip_prefix': '0.0.0.0/0', + 'revision_number': 9, + 'security_group_id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'tenant_id': '11', + 'updated_at': '12' + }), + openstack.network.v2.security_group_rule.SecurityGroupRule(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe2', + 'created_at': '0', + 'description': '1', + 'direction': 'egress', + 'ethertype': '3', + 'port_range_max': '80', + 'port_range_min': '80', + 'protocol': 'tcp', + 'remote_group_id': '7', + 'remote_ip_prefix': '0.0.0.0/0', + 'revision_number': 9, + 'security_group_id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'tenant_id': '11', + 'updated_at': '12' + }) + ] + # Mock get security group response + mock_connection().network.create_security_group_rule = \ + mock.MagicMock(side_effect=security_group_rules) + + # Call configure in order to add security group rules + security_group.configure(security_group_rules=[ + { + 'remote_ip_prefix': '0.0.0.0/0', + 'port_range_max': '80', + 'port_range_min': '80', + 'direction': 'ingress', + 'protocol': 'tcp' + }, + { + 'remote_ip_prefix': '0.0.0.0/0', + 'port_range_max': '80', + 'port_range_min': '80', + 'direction': 'egress', + 'protocol': 'tcp' + } + ]) + + def test_disable_default_egress_rules(self, mock_connection): + # Prepare the context for configure operation + properties = dict() + properties['disable_default_egress_rules'] = True + properties.update(self.node_properties) + + self._prepare_context_for_operation( + test_name='SecurityGroupTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.configure', + test_properties=properties, + test_runtime_properties={ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8' + }) + + default_security_group_rules = [ + openstack.network.v2.security_group_rule.SecurityGroupRule(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe2', + 'created_at': '0', + 'description': '1', + 'direction': 'ingress', + 'ethertype': 'IPv4', + 'port_range_max': '-1', + 'port_range_min': '-1', + 'protocol': 'tcp', + 'remote_group_id': '7', + 'remote_ip_prefix': '0.0.0.0/0', + 'revision_number': 9, + 'security_group_id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'tenant_id': '11', + 'updated_at': '12' + }), + openstack.network.v2.security_group_rule.SecurityGroupRule(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe4', + 'created_at': '0', + 'description': '1', + 'direction': 'ingress', + 'ethertype': 'IPv6', + 'port_range_max': '-1', + 'port_range_min': '-1', + 'protocol': 'tcp', + 'remote_group_id': '7', + 'remote_ip_prefix': '::/0', + 'revision_number': 9, + 'security_group_id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'tenant_id': '11', + 'updated_at': '12' + }) + ] + + security_group_rules = [ + openstack.network.v2.security_group_rule.SecurityGroupRule(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe3', + 'created_at': '0', + 'description': '1', + 'direction': 'ingress', + 'ethertype': '3', + 'port_range_max': '80', + 'port_range_min': '80', + 'protocol': 'tcp', + 'remote_group_id': '7', + 'remote_ip_prefix': '0.0.0.0/0', + 'revision_number': 9, + 'security_group_id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'tenant_id': '11', + 'updated_at': '12' + }), + openstack.network.v2.security_group_rule.SecurityGroupRule(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe2', + 'created_at': '0', + 'description': '1', + 'direction': 'egress', + 'ethertype': '3', + 'port_range_max': '80', + 'port_range_min': '80', + 'protocol': 'tcp', + 'remote_group_id': '7', + 'remote_ip_prefix': '0.0.0.0/0', + 'revision_number': 9, + 'security_group_id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'tenant_id': '11', + 'updated_at': '12' + }) + ] + # Mock create security group rule response + mock_connection().network.create_security_group_rule = \ + mock.MagicMock(side_effect=security_group_rules) + + # Mock delete security group rule response + mock_connection().network.delete_security_group_rule = \ + mock.MagicMock(retrun_value=None) + + # Mock list security group rules response + mock_connection().network.security_group_rules = \ + mock.MagicMock(retrun_value=default_security_group_rules) + + # Call configure in order to add security group rules + security_group.configure(security_group_rules=[ + { + 'remote_ip_prefix': '0.0.0.0/0', + 'port_range_max': '80', + 'port_range_min': '80', + 'direction': 'ingress', + 'protocol': 'tcp' + }, + { + 'remote_ip_prefix': '0.0.0.0/0', + 'port_range_max': '80', + 'port_range_min': '80', + 'direction': 'egress', + 'protocol': 'tcp' + } + ]) + + def test_delete(self, mock_connection): + # Prepare the context for delete operation + self._prepare_context_for_operation( + test_name='SecurityGroupTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.delete') + + security_group_instance = \ + openstack.network.v2.security_group.SecurityGroup(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_security_group', + 'created_at': '2016-10-04T12:14:57.233772', + 'description': '1', + 'revision_number': 3, + 'tenant_id': '4', + 'updated_at': '2016-10-14T12:16:57.233772', + 'tags': ['5'] + }) + # Mock delete security group response + mock_connection().network.delete_security_group = \ + mock.MagicMock(return_value=None) + + # Mock get security group response + mock_connection().network.get_security_group = \ + mock.MagicMock(return_value=security_group_instance) + + # Call delete security group + security_group.delete() + + for attr in [RESOURCE_ID, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY]: + self.assertNotIn(attr, self._ctx.instance.runtime_properties) + + def test_update(self, mock_connection): + # Prepare the context for update operation + self._prepare_context_for_operation( + test_name='SecurityGroupTestCase', + ctx_operation_name='cloudify.interfaces.operations.update') + + old_security_group_instance = \ + openstack.network.v2.security_group.SecurityGroup(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_security_group', + 'created_at': '2016-10-04T12:14:57.233772', + 'description': '1', + 'revision_number': 3, + 'tenant_id': '4', + 'updated_at': '2016-10-14T12:16:57.233772', + 'tags': ['5'] + }) + + new_config = { + 'name': 'test_updated_security_group', + } + + new_security_group_instance = \ + openstack.network.v2.security_group.SecurityGroup(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_updated_security_group', + 'created_at': '2016-10-04T12:14:57.233772', + 'description': '1', + 'revision_number': 3, + 'tenant_id': '4', + 'updated_at': '2016-10-14T12:16:57.233772', + 'tags': ['5'] + }) + + # Mock get security group response + mock_connection().network.get_security_group = \ + mock.MagicMock(return_value=old_security_group_instance) + + # Mock update security group response + mock_connection().network.update_security_group = \ + mock.MagicMock(return_value=new_security_group_instance) + + # Call update security group + security_group.update(args=new_config) + + def test_list_security_groups(self, mock_connection): + # Prepare the context for list security groups operation + self._prepare_context_for_operation( + test_name='SecurityGroupTestCase', + ctx_operation_name='cloudify.interfaces.operations.list') + + security_groups = [ + openstack.network.v2.security_group.SecurityGroup(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_security_group_1', + 'created_at': '2016-10-04T12:14:57.233772', + 'description': '1', + 'revision_number': 3, + 'tenant_id': '4', + 'updated_at': '2016-10-14T12:16:57.233772', + 'tags': ['5'] + }), + openstack.network.v2.security_group.SecurityGroup(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe3', + 'name': 'test_security_group_2', + 'created_at': '2016-10-04T12:14:57.233772', + 'description': '1', + 'revision_number': 3, + 'tenant_id': '4', + 'updated_at': '2016-10-14T12:16:57.233772', + 'tags': ['5'] + }), + ] + + # Mock list security groups response + mock_connection().network.security_groups = \ + mock.MagicMock(return_value=security_groups) + + # Call list security groups + security_group.list_security_groups() + + # Check if the security groups list saved as runtime properties + self.assertIn( + 'security_group_list', + self._ctx.instance.runtime_properties) + + # Check the size of security groups list + self.assertEqual( + len(self._ctx.instance.runtime_properties['security_group_list']), + 2) + + @mock.patch('openstack_sdk.common.OpenstackResource.get_quota_sets') + def test_creation_validation(self, mock_quota_sets, mock_connection): + # Prepare the context for creation validation operation + self._prepare_context_for_operation( + test_name='SecurityGroupTestCase', + ctx_operation_name='cloudify.interfaces.validation.creation') + + security_groups = [ + openstack.network.v2.security_group.SecurityGroup(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_security_group_1', + 'created_at': '2016-10-04T12:14:57.233772', + 'description': '1', + 'revision_number': 3, + 'tenant_id': '4', + 'updated_at': '2016-10-14T12:16:57.233772', + 'tags': ['5'] + }), + openstack.network.v2.security_group.SecurityGroup(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe3', + 'name': 'test_security_group_2', + 'created_at': '2016-10-04T12:14:57.233772', + 'description': '1', + 'revision_number': 3, + 'tenant_id': '4', + 'updated_at': '2016-10-14T12:16:57.233772', + 'tags': ['5'] + }), + ] + + # Mock list security groups response + mock_connection().network.routers = \ + mock.MagicMock(return_value=security_groups) + + # Mock the quota size response + mock_quota_sets.return_value = 20 + + # Call creation validation + security_group.creation_validation() diff --git a/openstack_plugin/tests/network/test_security_group_rule.py b/openstack_plugin/tests/network/test_security_group_rule.py new file mode 100644 index 00000000..1e38ec82 --- /dev/null +++ b/openstack_plugin/tests/network/test_security_group_rule.py @@ -0,0 +1,232 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Third party imports +import mock +import openstack.network.v2.security_group_rule + +# Local imports +from openstack_plugin.tests.base import OpenStackTestBase +from openstack_plugin.resources.network import security_group_rule +from openstack_plugin.constants import (RESOURCE_ID, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + SECURITY_GROUP_RULE_OPENSTACK_TYPE) + + +@mock.patch('openstack.connect') +class SecurityGroupRuleTestCase(OpenStackTestBase): + + def setUp(self): + super(SecurityGroupRuleTestCase, self).setUp() + + @property + def resource_config(self): + return { + 'description': 'security_group_rule_description', + } + + def test_create(self, mock_connection): + # Prepare the context for create operation + self._prepare_context_for_operation( + test_name='SecurityGroupRuleTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.create') + + security_group_rule_instance = \ + openstack.network.v2.security_group_rule.SecurityGroupRule(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'created_at': '0', + 'description': '1', + 'direction': 'ingress', + 'ethertype': '3', + 'port_range_max': '80', + 'port_range_min': '80', + 'protocol': 'tcp', + 'remote_group_id': '7', + 'remote_ip_prefix': '0.0.0.0/0', + 'revision_number': 9, + 'security_group_id': 'a95b5509-c122-4c2f-823e-884bb559afe3', + 'tenant_id': '11', + 'updated_at': '12' + }) + # Mock create security group rule response + mock_connection().network.create_security_group_rule = \ + mock.MagicMock(return_value=security_group_rule_instance) + + # Call create security group rule + security_group_rule.create() + + self.assertEqual(self._ctx.instance.runtime_properties[RESOURCE_ID], + 'a95b5509-c122-4c2f-823e-884bb559afe8') + + self.assertEqual( + self._ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY], + None) + + self.assertEqual( + self._ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY], + SECURITY_GROUP_RULE_OPENSTACK_TYPE) + + def test_delete(self, mock_connection): + # Prepare the context for delete operation + self._prepare_context_for_operation( + test_name='SecurityGroupRuleTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.delete') + + security_group_rule_instance = \ + openstack.network.v2.security_group_rule.SecurityGroupRule(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'created_at': '0', + 'description': '1', + 'direction': 'ingress', + 'ethertype': '3', + 'port_range_max': '80', + 'port_range_min': '80', + 'protocol': 'tcp', + 'remote_group_id': '7', + 'remote_ip_prefix': '0.0.0.0/0', + 'revision_number': 9, + 'security_group_id': 'a95b5509-c122-4c2f-823e-884bb559afe3', + 'tenant_id': '11', + 'updated_at': '12' + }) + # Mock delete security group rule response + mock_connection().network.delete_security_group_rule = \ + mock.MagicMock(return_value=None) + + # Mock get security group rule response + mock_connection().network.get_security_group = \ + mock.MagicMock(return_value=security_group_rule_instance) + + # Call delete security group rule + security_group_rule.delete() + + for attr in [RESOURCE_ID, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY]: + self.assertNotIn(attr, self._ctx.instance.runtime_properties) + + def test_list_security_group_rules(self, mock_connection): + # Prepare the context for list security group rules operation + self._prepare_context_for_operation( + test_name='SecurityGroupRuleTestCase', + ctx_operation_name='cloudify.interfaces.operations.list') + + security_group_rules = [ + openstack.network.v2.security_group_rule.SecurityGroupRule(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe3', + 'created_at': '0', + 'description': '1', + 'direction': 'ingress', + 'ethertype': '3', + 'port_range_max': '80', + 'port_range_min': '80', + 'protocol': 'tcp', + 'remote_group_id': '7', + 'remote_ip_prefix': '0.0.0.0/0', + 'revision_number': 9, + 'security_group_id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'tenant_id': '11', + 'updated_at': '12' + }), + openstack.network.v2.security_group_rule.SecurityGroupRule(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe2', + 'created_at': '0', + 'description': '1', + 'direction': 'egress', + 'ethertype': '3', + 'port_range_max': '80', + 'port_range_min': '80', + 'protocol': 'tcp', + 'remote_group_id': '7', + 'remote_ip_prefix': '0.0.0.0/0', + 'revision_number': 9, + 'security_group_id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'tenant_id': '11', + 'updated_at': '12' + }) + ] + + # Mock list security group rules response + mock_connection().network.security_group_rules = \ + mock.MagicMock(return_value=security_group_rules) + + # Call list security group rules + security_group_rule.list_security_group_rules() + + # Check if the security group rules list saved as runtime properties + self.assertIn( + 'security_group_rule_list', + self._ctx.instance.runtime_properties) + + # Check the size of security groups list + self.assertEqual( + len(self._ctx.instance.runtime_properties[ + 'security_group_rule_list']), 2) + + @mock.patch('openstack_sdk.common.OpenstackResource.get_quota_sets') + def test_creation_validation(self, mock_quota_sets, mock_connection): + # Prepare the context for creation validation operation + self._prepare_context_for_operation( + test_name='SecurityGroupRuleTestCase', + ctx_operation_name='cloudify.interfaces.validation.creation') + + security_group_rules = [ + openstack.network.v2.security_group_rule.SecurityGroupRule(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe3', + 'created_at': '0', + 'description': '1', + 'direction': 'ingress', + 'ethertype': '3', + 'port_range_max': '80', + 'port_range_min': '80', + 'protocol': 'tcp', + 'remote_group_id': '7', + 'remote_ip_prefix': '0.0.0.0/0', + 'revision_number': 9, + 'security_group_id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'tenant_id': '11', + 'updated_at': '12' + }), + openstack.network.v2.security_group_rule.SecurityGroupRule(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe2', + 'created_at': '0', + 'description': '1', + 'direction': 'egress', + 'ethertype': '3', + 'port_range_max': '80', + 'port_range_min': '80', + 'protocol': 'tcp', + 'remote_group_id': '7', + 'remote_ip_prefix': '0.0.0.0/0', + 'revision_number': 9, + 'security_group_id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'tenant_id': '11', + 'updated_at': '12' + }) + ] + + # Mock list security group rules response + mock_connection().network.security_group_rules = \ + mock.MagicMock(return_value=security_group_rules) + + # Call list security group rules + security_group_rule.list_security_group_rules() + + # Mock the quota size response + mock_quota_sets.return_value = 20 + + # Call creation validation + security_group_rule.creation_validation() diff --git a/openstack_plugin/tests/network/test_subnet.py b/openstack_plugin/tests/network/test_subnet.py new file mode 100644 index 00000000..a5ccf223 --- /dev/null +++ b/openstack_plugin/tests/network/test_subnet.py @@ -0,0 +1,368 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Third party imports +import mock +import openstack.network.v2.subnet +import openstack.network.v2.network + +# Local imports +from openstack_plugin.tests.base import OpenStackTestBase +from openstack_plugin.resources.network import subnet +from openstack_plugin.constants import (RESOURCE_ID, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + SUBNET_OPENSTACK_TYPE, + NETWORK_OPENSTACK_TYPE, + NETWORK_NODE_TYPE) + + +@mock.patch('openstack.connect') +class SubnetTestCase(OpenStackTestBase): + + def setUp(self): + super(SubnetTestCase, self).setUp() + + @property + def resource_config(self): + return { + 'name': 'test_subnet', + 'description': 'subnet_description', + } + + def test_create(self, mock_connection): + # Prepare the context for create operation + rel_specs = [ + { + 'node': { + 'id': 'network-1', + 'properties': { + 'client_config': self.client_config, + 'resource_config': { + 'name': 'test-network', + } + } + }, + 'instance': { + 'id': 'network-1-efrgsd', + 'runtime_properties': { + RESOURCE_ID: 'a95b5509-c122-4c2f-823e-884bb559afe4', + OPENSTACK_TYPE_PROPERTY: NETWORK_OPENSTACK_TYPE, + OPENSTACK_NAME_PROPERTY: 'test-network' + } + }, + 'type': NETWORK_NODE_TYPE, + }, + ] + + subnet_rels = self.get_mock_relationship_ctx_for_node(rel_specs) + self._prepare_context_for_operation( + test_name='SubnetTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.create', + test_relationships=subnet_rels) + + subnet_instance = openstack.network.v2.subnet.Subnet(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_subnet', + 'allocation_pools': [{'1': 1}], + 'cidr': '2', + 'created_at': '3', + 'description': '4', + 'dns_nameservers': ['5'], + 'enable_dhcp': True, + 'gateway_ip': '6', + 'host_routes': ['7'], + 'ip_version': 8, + 'ipv6_address_mode': '9', + 'ipv6_ra_mode': '10', + 'network_id': '12', + 'revision_number': 13, + 'segment_id': '14', + 'service_types': ['15'], + 'subnetpool_id': '16', + 'tenant_id': '17', + 'updated_at': '18', + 'use_default_subnetpool': True, + }) + + # Mock create subnet response + mock_connection().network.create_subnet = \ + mock.MagicMock(return_value=subnet_instance) + + # Call create subnet + subnet.create() + + self.assertEqual(self._ctx.instance.runtime_properties[RESOURCE_ID], + 'a95b5509-c122-4c2f-823e-884bb559afe8') + + self.assertEqual( + self._ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY], + 'test_subnet') + + self.assertEqual( + self._ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY], + SUBNET_OPENSTACK_TYPE) + + def test_delete(self, mock_connection): + # Prepare the context for delete operation + self._prepare_context_for_operation( + test_name='SubnetTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.delete') + + subnet_instance = openstack.network.v2.subnet.Subnet(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_subnet', + 'allocation_pools': [{'1': 1}], + 'cidr': '2', + 'created_at': '3', + 'description': '4', + 'dns_nameservers': ['5'], + 'enable_dhcp': True, + 'gateway_ip': '6', + 'host_routes': ['7'], + 'ip_version': 8, + 'ipv6_address_mode': '9', + 'ipv6_ra_mode': '10', + 'network_id': '12', + 'revision_number': 13, + 'segment_id': '14', + 'service_types': ['15'], + 'subnetpool_id': '16', + 'tenant_id': '17', + 'updated_at': '18', + 'use_default_subnetpool': True, + }) + # Mock delete subnet response + mock_connection().network.delete_subnet = \ + mock.MagicMock(return_value=None) + + # Mock get subnet response + mock_connection().network.get_subnet = \ + mock.MagicMock(return_value=subnet_instance) + + # Call delete subnet + subnet.delete() + + for attr in [RESOURCE_ID, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY]: + self.assertNotIn(attr, self._ctx.instance.runtime_properties) + + def test_update(self, mock_connection): + # Prepare the context for update operation + self._prepare_context_for_operation( + test_name='SubnetTestCase', + ctx_operation_name='cloudify.interfaces.operations.update') + + old_subnet_instance = openstack.network.v2.subnet.Subnet(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_subnet', + 'allocation_pools': [{'1': 1}], + 'cidr': '2', + 'created_at': '3', + 'description': '4', + 'dns_nameservers': ['5'], + 'enable_dhcp': True, + 'gateway_ip': '6', + 'host_routes': ['7'], + 'ip_version': 8, + 'ipv6_address_mode': '9', + 'ipv6_ra_mode': '10', + 'network_id': '12', + 'revision_number': 13, + 'segment_id': '14', + 'service_types': ['15'], + 'subnetpool_id': '16', + 'tenant_id': '17', + 'updated_at': '18', + 'use_default_subnetpool': True, + }) + + new_config = { + 'name': 'test_updated_subnet', + } + + new_subnets_instance = \ + openstack.network.v2.subnet.Subnet(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_updated_subnet', + 'allocation_pools': [{'1': 1}], + 'cidr': '2', + 'created_at': '3', + 'description': '4', + 'dns_nameservers': ['5'], + 'enable_dhcp': True, + 'gateway_ip': '6', + 'host_routes': ['7'], + 'ip_version': 8, + 'ipv6_address_mode': '9', + 'ipv6_ra_mode': '10', + 'network_id': '12', + 'revision_number': 13, + 'segment_id': '14', + 'service_types': ['15'], + 'subnetpool_id': '16', + 'tenant_id': '17', + 'updated_at': '18', + 'use_default_subnetpool': True, + }) + + # Mock get subnet response + mock_connection().network.get_subnet = \ + mock.MagicMock(return_value=old_subnet_instance) + + # Mock update subnet response + mock_connection().network.update_subnet = \ + mock.MagicMock(return_value=new_subnets_instance) + + # Call update subnet + subnet.update(args=new_config) + + def test_list_subnets(self, mock_connection): + # Prepare the context for list subnets operation + self._prepare_context_for_operation( + test_name='SubnetTestCase', + ctx_operation_name='cloudify.interfaces.operations.list') + + subnets = [ + openstack.network.v2.subnet.Subnet(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_subnet_2', + 'allocation_pools': [{'1': 1}], + 'cidr': '2', + 'created_at': '3', + 'description': '4', + 'dns_nameservers': ['5'], + 'enable_dhcp': True, + 'gateway_ip': '6', + 'host_routes': ['7'], + 'ip_version': 8, + 'ipv6_address_mode': '9', + 'ipv6_ra_mode': '10', + 'network_id': '12', + 'revision_number': 13, + 'segment_id': '14', + 'service_types': ['15'], + 'subnetpool_id': '16', + 'tenant_id': '17', + 'updated_at': '18', + 'use_default_subnetpool': True, + }), + openstack.network.v2.subnet.Subnet(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe3', + 'name': 'test_subnet_2', + 'allocation_pools': [{'1': 1}], + 'cidr': '2', + 'created_at': '3', + 'description': '4', + 'dns_nameservers': ['5'], + 'enable_dhcp': True, + 'gateway_ip': '6', + 'host_routes': ['7'], + 'ip_version': 8, + 'ipv6_address_mode': '9', + 'ipv6_ra_mode': '10', + 'network_id': '12', + 'revision_number': 13, + 'segment_id': '14', + 'service_types': ['15'], + 'subnetpool_id': '16', + 'tenant_id': '17', + 'updated_at': '18', + 'use_default_subnetpool': True, + }), + ] + + # Mock list subnets response + mock_connection().network.subnets = \ + mock.MagicMock(return_value=subnets) + + # Call list subnets + subnet.list_subnets() + + # Check if the subnets list saved as runtime properties + self.assertIn( + 'subnet_list', + self._ctx.instance.runtime_properties) + + # Check the size of subnets list + self.assertEqual( + len(self._ctx.instance.runtime_properties['subnet_list']), 2) + + @mock.patch('openstack_sdk.common.OpenstackResource.get_quota_sets') + def test_creation_validation(self, mock_quota_sets, mock_connection): + # Prepare the context for creation validation operation + self._prepare_context_for_operation( + test_name='SubnetTestCase', + ctx_operation_name='cloudify.interfaces.validation.creation') + + subnets = [ + openstack.network.v2.subnet.Subnet(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_subnet_2', + 'allocation_pools': [{'1': 1}], + 'cidr': '2', + 'created_at': '3', + 'description': '4', + 'dns_nameservers': ['5'], + 'enable_dhcp': True, + 'gateway_ip': '6', + 'host_routes': ['7'], + 'ip_version': 8, + 'ipv6_address_mode': '9', + 'ipv6_ra_mode': '10', + 'network_id': '12', + 'revision_number': 13, + 'segment_id': '14', + 'service_types': ['15'], + 'subnetpool_id': '16', + 'tenant_id': '17', + 'updated_at': '18', + 'use_default_subnetpool': True, + }), + openstack.network.v2.subnet.Subnet(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe3', + 'name': 'test_subnet_2', + 'allocation_pools': [{'1': 1}], + 'cidr': '2', + 'created_at': '3', + 'description': '4', + 'dns_nameservers': ['5'], + 'enable_dhcp': True, + 'gateway_ip': '6', + 'host_routes': ['7'], + 'ip_version': 8, + 'ipv6_address_mode': '9', + 'ipv6_ra_mode': '10', + 'network_id': '12', + 'revision_number': 13, + 'segment_id': '14', + 'service_types': ['15'], + 'subnetpool_id': '16', + 'tenant_id': '17', + 'updated_at': '18', + 'use_default_subnetpool': True, + }), + ] + + # Mock list subnets response + mock_connection().network.subnets = \ + mock.MagicMock(return_value=subnets) + + # Mock the quota size response + mock_quota_sets.return_value = 20 + + # Call creation validation + subnet.creation_validation() diff --git a/openstack_plugin/tests/volume/__init__.py b/openstack_plugin/tests/volume/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/openstack_plugin/tests/volume/test_volume.py b/openstack_plugin/tests/volume/test_volume.py new file mode 100644 index 00000000..06322648 --- /dev/null +++ b/openstack_plugin/tests/volume/test_volume.py @@ -0,0 +1,668 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Third party imports +import mock +import openstack.block_storage.v2.volume +import openstack.block_storage.v2.backup +import openstack.block_storage.v2.snapshot +import openstack.exceptions +from cloudify.exceptions import (OperationRetry, + NonRecoverableError) + +# Local imports +from openstack_plugin.tests.base import OpenStackTestBase +from openstack_plugin.resources.volume import volume +from openstack_plugin.utils import get_snapshot_name +from openstack_plugin.constants import (RESOURCE_ID, + IMAGE_NODE_TYPE, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + IMAGE_OPENSTACK_TYPE, + VOLUME_OPENSTACK_TYPE, + VOLUME_STATUS_CREATING, + VOLUME_STATUS_AVAILABLE, + VOLUME_STATUS_DELETING, + VOLUME_BOOTABLE, + VOLUME_SNAPSHOT_TASK, + VOLUME_BACKUP_ID) + + +@mock.patch('openstack.connect') +class VolumeTestCase(OpenStackTestBase): + + def setUp(self): + super(VolumeTestCase, self).setUp() + + @property + def resource_config(self): + return { + 'name': 'test_volume', + 'description': 'volume_description', + 'size': '12' + } + + def test_create(self, mock_connection): + # Prepare the context for create operation + rel_specs = [ + { + 'node': { + 'id': 'image-1', + 'properties': { + 'client_config': self.client_config, + 'resource_config': { + 'name': 'test-image', + } + } + }, + 'instance': { + 'id': 'image-1-efrgsd', + 'runtime_properties': { + RESOURCE_ID: '1', + OPENSTACK_TYPE_PROPERTY: IMAGE_OPENSTACK_TYPE, + OPENSTACK_NAME_PROPERTY: 'test-image' + } + }, + 'type': IMAGE_NODE_TYPE, + } + ] + volume_rels = self.get_mock_relationship_ctx_for_node(rel_specs) + + self._prepare_context_for_operation( + test_name='VolumeTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.create', + test_relationships=volume_rels) + + volume_instance = openstack.block_storage.v2.volume.Volume(**{ + 'id': '1', + 'name': 'test_volume', + 'description': 'volume_description', + 'status': VOLUME_STATUS_CREATING + + }) + # Mock create volume response + mock_connection().block_storage.create_volume = \ + mock.MagicMock(return_value=volume_instance) + + # Call create volume + volume.create() + + self.assertEqual(self._ctx.instance.runtime_properties[RESOURCE_ID], + '1') + + self.assertEqual( + self._ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY], + 'test_volume') + + self.assertEqual( + self._ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY], + VOLUME_OPENSTACK_TYPE) + + @mock.patch( + 'openstack_plugin.resources.volume.volume.wait_until_status') + def test_start(self, mock_wait_status, mock_connection): + # Prepare the context for start operation + self._prepare_context_for_operation( + test_name='VolumeTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.start') + + volume_instance = openstack.block_storage.v2.volume.Volume(**{ + 'id': '1', + 'name': 'test_volume', + 'description': 'volume_description', + 'availability_zone': 'test_availability_zone', + 'is_bootable': False, + 'status': VOLUME_STATUS_AVAILABLE + + }) + # Mock get volume response + mock_connection().block_storage.get_volume = \ + mock.MagicMock(return_value=volume_instance) + + mock_wait_status.return_value = volume_instance + + # Call start volume + volume.start() + + self.assertEqual( + self._ctx.instance.runtime_properties[VOLUME_BOOTABLE], False) + + self.assertEqual( + self._ctx.instance.runtime_properties['availability_zone'], + 'test_availability_zone') + + @mock.patch( + 'openstack_plugin.resources.volume.volume._delete_volume_snapshot') + def test_delete(self, mock_delete_volume_snapshot, mock_connection): + # Prepare the context for start operation + self._prepare_context_for_operation( + test_name='VolumeTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.delete') + + volume_instance = openstack.block_storage.v2.volume.Volume(**{ + 'id': '1', + 'name': 'test_volume', + 'description': 'volume_description', + 'availability_zone': 'test_availability_zone', + 'is_bootable': False, + 'status': VOLUME_STATUS_AVAILABLE + + }) + # Mock get volume response + mock_connection().block_storage.get_volume = \ + mock.MagicMock(side_effect=[volume_instance, + openstack.exceptions.ResourceNotFound]) + + # Mock delete volume response + mock_connection().block_storage.delete_volume = \ + mock.MagicMock(return_value=None) + + # Call delete volume + volume.delete() + + mock_delete_volume_snapshot.assert_called() + + for attr in [RESOURCE_ID, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY]: + self.assertNotIn(attr, self._ctx.instance.runtime_properties) + + @mock.patch( + 'openstack_plugin.resources.volume.volume._delete_volume_snapshot') + def test_delete_retry(self, mock_delete_volume_snapshot, mock_connection): + # Prepare the context for delete operation + self._prepare_context_for_operation( + test_name='VolumeTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.delete') + + volume_instance = openstack.block_storage.v2.volume.Volume(**{ + 'id': '1', + 'name': 'test_volume', + 'description': 'volume_description', + 'availability_zone': 'test_availability_zone', + 'is_bootable': False, + 'status': VOLUME_STATUS_AVAILABLE + + }) + + volume_instance_deleting = openstack.block_storage.v2.volume.Volume(**{ + 'id': '1', + 'name': 'test_volume', + 'description': 'volume_description', + 'availability_zone': 'test_availability_zone', + 'is_bootable': False, + 'status': VOLUME_STATUS_DELETING + + }) + # Mock get volume response + mock_connection().block_storage.get_volume = \ + mock.MagicMock(side_effect=[volume_instance, + volume_instance_deleting]) + + # Mock delete volume response + mock_connection().block_storage.delete_volume = \ + mock.MagicMock(return_value=None) + + with self.assertRaises(OperationRetry): + # Call delete volume + volume.delete() + mock_delete_volume_snapshot.assert_called() + + def test_create_volume_backup(self, mock_connection): + # Prepare the context for create snapshot operation + self._prepare_context_for_operation( + test_name='VolumeTestCase', + ctx_operation_name='cloudify.interfaces.snapshot.create') + + # Set resource id as runtime properties for volume instance + self._ctx.instance.runtime_properties['id'] = '1' + + snapshot_name = \ + get_snapshot_name('volume', 'test_volume_backup', False) + + volume_backup_instance = openstack.block_storage.v2.backup.Backup(**{ + 'id': '1', + 'name': snapshot_name, + 'description': 'volume_backup_description', + 'availability_zone': 'test_availability_zone', + 'status': VOLUME_STATUS_CREATING + + }) + available_volume_backup = \ + openstack.block_storage.v2.backup.Backup(**{ + 'id': '1', + 'name': snapshot_name, + 'description': 'volume_backup_description', + 'availability_zone': 'test_availability_zone', + 'status': VOLUME_STATUS_AVAILABLE + }) + # Mock create volume backup response + mock_connection().block_storage.create_backup = \ + mock.MagicMock(return_value=volume_backup_instance) + + # Mock get volume backup response + mock_connection().block_storage.get_backup = \ + mock.MagicMock(return_value=available_volume_backup) + + snapshot_params = { + 'snapshot_name': 'test_volume_backup', + 'snapshot_incremental': False + } + + # Call create backup volume volume + volume.snapshot_create(**snapshot_params) + + for attr in [VOLUME_SNAPSHOT_TASK, VOLUME_BACKUP_ID]: + self.assertNotIn(attr, self._ctx.instance.runtime_properties) + + def test_create_volume_backup_with_retry(self, mock_connection): + # Prepare the context for create snapshot operation + self._prepare_context_for_operation( + test_name='VolumeTestCase', + ctx_operation_name='cloudify.interfaces.snapshot.create') + + # Set resource id as runtime properties for volume instance + self._ctx.instance.runtime_properties['id'] = '1' + + snapshot_name = \ + get_snapshot_name('volume', 'test_volume_backup', False) + + volume_backup_instance = openstack.block_storage.v2.backup.Backup(**{ + 'id': '1', + 'name': snapshot_name, + 'description': 'volume_backup_description', + 'availability_zone': 'test_availability_zone', + 'status': VOLUME_STATUS_CREATING + + }) + # Mock create volume backup response + mock_connection().block_storage.create_backup = \ + mock.MagicMock(return_value=volume_backup_instance) + + # Mock get volume backup response + mock_connection().block_storage.get_backup = \ + mock.MagicMock(return_value=volume_backup_instance) + + snapshot_params = { + 'snapshot_name': 'test_volume_backup', + 'snapshot_incremental': False + } + + # Call create backup volume volume + with self.assertRaises(OperationRetry): + volume.snapshot_create(**snapshot_params) + + def test_create_volume_snapshot(self, mock_connection): + # Prepare the context for create snapshot operation + self._prepare_context_for_operation( + test_name='VolumeTestCase', + ctx_operation_name='cloudify.interfaces.snapshot.create') + + # Set resource id as runtime properties for volume instance + self._ctx.instance.runtime_properties['id'] = '1' + + snapshot_name = \ + get_snapshot_name('volume', 'test_volume_snapshot', True) + + volume_snapshot_instance = \ + openstack.block_storage.v2.snapshot.Snapshot(**{ + 'id': '1', + 'name': snapshot_name, + 'description': 'volume_snapshot_description', + 'status': VOLUME_STATUS_CREATING + }) + available_volume_snapshot = \ + openstack.block_storage.v2.backup.Backup(**{ + 'id': '1', + 'name': snapshot_name, + 'description': 'volume_snapshot_description', + 'status': VOLUME_STATUS_AVAILABLE + }) + # Mock create volume snapshot response + mock_connection().block_storage.create_snapshot = \ + mock.MagicMock(return_value=volume_snapshot_instance) + + # Mock get volume snapshot response + mock_connection().block_storage.get_snapshot = \ + mock.MagicMock(return_value=available_volume_snapshot) + + snapshot_params = { + 'snapshot_name': 'test_volume_snapshot', + 'snapshot_type': 'Daily', + 'snapshot_incremental': True + } + + # Call create snapshot volume volume + volume.snapshot_create(**snapshot_params) + + for attr in [VOLUME_SNAPSHOT_TASK, VOLUME_BACKUP_ID]: + self.assertNotIn(attr, self._ctx.instance.runtime_properties) + + def test_create_volume_snapshot_with_retry(self, mock_connection): + # Prepare the context for create snapshot operation + self._prepare_context_for_operation( + test_name='VolumeTestCase', + ctx_operation_name='cloudify.interfaces.snapshot.create') + + # Set resource id as runtime properties for volume instance + self._ctx.instance.runtime_properties['id'] = '1' + + snapshot_name = \ + get_snapshot_name('volume', 'test_volume_snapshot', True) + + volume_snapshot_instance = \ + openstack.block_storage.v2.snapshot.Snapshot(**{ + 'id': '1', + 'name': snapshot_name, + 'description': 'volume_snapshot_description', + 'status': VOLUME_STATUS_CREATING + }) + # Mock create volume snapshot response + mock_connection().block_storage.create_snapshot = \ + mock.MagicMock(return_value=volume_snapshot_instance) + + # Mock get volume snapshot response + mock_connection().block_storage.get_snapshot = \ + mock.MagicMock(return_value=volume_snapshot_instance) + + snapshot_params = { + 'snapshot_name': 'test_volume_snapshot', + 'snapshot_type': 'Daily', + 'snapshot_incremental': True + } + + # Call create snapshot volume volume + with self.assertRaises(OperationRetry): + volume.snapshot_create(**snapshot_params) + + def test_restore_volume_backup(self, mock_connection): + # Prepare the context for apply snapshot operation + self._prepare_context_for_operation( + test_name='VolumeTestCase', + ctx_operation_name='cloudify.interfaces.snapshot.apply') + + # Set resource id as runtime properties for volume instance + self._ctx.instance.runtime_properties['id'] = '1' + + snapshot_name = \ + get_snapshot_name('volume', 'test_volume_backup', False) + + restored_volume_backup_instance = \ + openstack.block_storage.v2.backup.Backup(**{ + 'id': '1', + 'name': snapshot_name, + 'description': 'volume_backup_description', + 'availability_zone': 'test_availability_zone', + 'status': VOLUME_STATUS_CREATING + }) + + volume_backups = [ + openstack.block_storage.v2.backup.Backup(**{ + 'id': '1', + 'name': snapshot_name, + 'description': 'volume_backup_description', + 'availability_zone': 'test_availability_zone', + 'status': VOLUME_STATUS_AVAILABLE + }), + openstack.block_storage.v2.backup.Backup(**{ + 'id': '2', + 'name': 'test_volume_backup_2', + 'description': 'volume_backup_description', + 'availability_zone': 'test_availability_zone', + 'status': VOLUME_STATUS_AVAILABLE + }) + ] + # Mock list volume backup response + mock_connection().block_storage.backups = \ + mock.MagicMock(return_value=volume_backups) + + # Mock restore volume backup response + mock_connection().block_storage.restore_backup = \ + mock.MagicMock(return_value=restored_volume_backup_instance) + + snapshot_params = { + 'snapshot_name': 'test_volume_backup', + 'snapshot_incremental': False + } + + # Call restore backup volume volume + volume.snapshot_apply(**snapshot_params) + + def test_restore_volume_snapshot(self, _): + # Prepare the context for apply snapshot operation + self._prepare_context_for_operation( + test_name='VolumeTestCase', + ctx_operation_name='cloudify.interfaces.snapshot.apply') + + # Set resource id as runtime properties for volume instance + self._ctx.instance.runtime_properties['id'] = '1' + + snapshot_params = { + 'snapshot_name': 'test_volume_snapshot', + 'snapshot_incremental': True + } + + # Call restore snapshot volume volume + with self.assertRaises(NonRecoverableError): + volume.snapshot_apply(**snapshot_params) + + def test_delete_volume_backup(self, mock_connection): + # Prepare the context for delete snapshot operation + self._prepare_context_for_operation( + test_name='VolumeTestCase', + ctx_operation_name='cloudify.interfaces.snapshot.delete') + + # Set resource id as runtime properties for volume instance + self._ctx.instance.runtime_properties['id'] = '1' + + snapshot_name = \ + get_snapshot_name('volume', 'test_volume_backup', False) + + volume_backup_to_delete = openstack.block_storage.v2.backup.Backup(**{ + 'id': '1', + 'name': snapshot_name, + 'description': 'volume_backup_description', + 'availability_zone': 'test_availability_zone', + 'status': VOLUME_STATUS_CREATING + + }) + + all_volume_backups = [ + openstack.block_storage.v2.backup.Backup(**{ + 'id': '1', + 'name': snapshot_name, + 'description': 'volume_backup_description', + 'availability_zone': 'test_availability_zone', + 'status': VOLUME_STATUS_AVAILABLE + }), + openstack.block_storage.v2.backup.Backup(**{ + 'id': '2', + 'name': 'test_volume_backup_2', + 'description': 'volume_backup_description', + 'availability_zone': 'test_availability_zone', + 'status': VOLUME_STATUS_AVAILABLE + }) + ] + + remaining_volume_backups = [ + openstack.block_storage.v2.backup.Backup(**{ + 'id': '2', + 'name': 'test_volume_backup_2', + 'description': 'volume_backup_description', + 'availability_zone': 'test_availability_zone', + 'status': VOLUME_STATUS_AVAILABLE + }) + ] + # Mock list volume backup response + mock_connection().block_storage.backups = \ + mock.MagicMock(side_effect=[all_volume_backups, + remaining_volume_backups]) + + # Mock get volume backup response + mock_connection().block_storage.get_backup = \ + mock.MagicMock(return_value=volume_backup_to_delete) + + # Mock delete volume backup response + mock_connection().block_storage.delete_backup = \ + mock.MagicMock(return_value=None) + + snapshot_params = { + 'snapshot_name': 'test_volume_backup', + 'snapshot_incremental': False + } + + # Call delete backup volume + volume.snapshot_delete(**snapshot_params) + + def test_delete_volume_snapshot(self, mock_connection): + # Prepare the context for delete snapshot operation + self._prepare_context_for_operation( + test_name='VolumeTestCase', + ctx_operation_name='cloudify.interfaces.snapshot.delete') + + # Set resource id as runtime properties for volume instance + self._ctx.instance.runtime_properties['id'] = '1' + + snapshot_name = \ + get_snapshot_name('volume', 'test_volume_snapshot', True) + + volume_snapshot_to_delete = \ + openstack.block_storage.v2.snapshot.Snapshot(**{ + 'id': '1', + 'name': snapshot_name, + 'description': 'volume_backup_description', + 'status': VOLUME_STATUS_CREATING + }) + + all_volume_snapshots = [ + openstack.block_storage.v2.snapshot.Snapshot(**{ + 'id': '1', + 'name': snapshot_name, + 'description': 'volume_backup_description', + 'status': VOLUME_STATUS_CREATING + }), + openstack.block_storage.v2.snapshot.Snapshot(**{ + 'id': '1', + 'name': 'test_volume_snapshot_2', + 'description': 'volume_backup_description', + 'status': VOLUME_STATUS_CREATING + }) + ] + + remaining_volume_snapshots = [ + openstack.block_storage.v2.snapshot.Snapshot(**{ + 'id': '1', + 'name': 'test_volume_snapshot_2', + 'description': 'volume_backup_description', + 'status': VOLUME_STATUS_CREATING + }) + ] + # Mock list volume snapshots response + mock_connection().block_storage.snapshots = \ + mock.MagicMock(side_effect=[all_volume_snapshots, + remaining_volume_snapshots]) + + # Mock get volume snapshot response + mock_connection().block_storage.get_snapshot = \ + mock.MagicMock(return_value=volume_snapshot_to_delete) + + # Mock delete volume snapshot response + mock_connection().block_storage.delete_snapshot = \ + mock.MagicMock(return_value=None) + + snapshot_params = { + 'snapshot_name': 'test_volume_snapshot', + 'snapshot_incremental': True + } + + # Call delete snapshot volume + volume.snapshot_delete(**snapshot_params) + + def test_list_volumes(self, mock_connection): + # Prepare the context for list volumes operation + self._prepare_context_for_operation( + test_name='VolumeTestCase', + ctx_operation_name='cloudify.interfaces.operations.list') + + volumes = [ + openstack.block_storage.v2.volume.Volume(**{ + 'id': '1', + 'name': 'test_volume_1', + 'description': 'volume_description_1', + 'availability_zone': 'test_availability_zone', + 'is_bootable': False, + 'status': VOLUME_STATUS_AVAILABLE + }), + openstack.block_storage.v2.volume.Volume(**{ + 'id': '2', + 'name': 'test_volume_2', + 'description': 'volume_description_2', + 'availability_zone': 'test_availability_zone', + 'is_bootable': False, + 'status': VOLUME_STATUS_AVAILABLE + }), + ] + + # Mock list volumes response + mock_connection().block_storage.volumes = \ + mock.MagicMock(return_value=volumes) + + # Call list volumes + volume.list_volumes() + + # Check if the projects list saved as runtime properties + self.assertIn( + 'volume_list', + self._ctx.instance.runtime_properties) + + # Check the size of volume list + self.assertEqual( + len(self._ctx.instance.runtime_properties['volume_list']), 2) + + @mock.patch('openstack_sdk.common.OpenstackResource.get_quota_sets') + def test_creation_validation(self, mock_quota_sets, mock_connection): + # Prepare the context for creation validation operation + self._prepare_context_for_operation( + test_name='VolumeTestCase', + ctx_operation_name='cloudify.interfaces.validation.creation') + + volumes = [ + openstack.block_storage.v2.volume.Volume(**{ + 'id': '1', + 'name': 'test_volume_1', + 'description': 'volume_description_1', + 'availability_zone': 'test_availability_zone', + 'is_bootable': False, + 'status': VOLUME_STATUS_AVAILABLE + }), + openstack.block_storage.v2.volume.Volume(**{ + 'id': '2', + 'name': 'test_volume_2', + 'description': 'volume_description_2', + 'availability_zone': 'test_availability_zone', + 'is_bootable': False, + 'status': VOLUME_STATUS_AVAILABLE + }), + ] + + # Mock list volumes response + mock_connection().block_storage.volumes = \ + mock.MagicMock(return_value=volumes) + + # Mock the quota size response + mock_quota_sets.return_value = 20 + + # Call creation validation + volume.creation_validation() diff --git a/openstack_plugin/tests/volume/test_volume_type.py b/openstack_plugin/tests/volume/test_volume_type.py new file mode 100644 index 00000000..7592b5c8 --- /dev/null +++ b/openstack_plugin/tests/volume/test_volume_type.py @@ -0,0 +1,102 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Third party imports +import mock +import openstack.block_storage.v2.type + +# Local imports +from openstack_plugin.tests.base import OpenStackTestBase +from openstack_plugin.resources.volume import volume_type +from openstack_plugin.constants import (RESOURCE_ID, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY, + VOLUME_TYPE_OPENSTACK_TYPE) + + +@mock.patch('openstack.connect') +class VolumeTypeTestCase(OpenStackTestBase): + + def setUp(self): + super(VolumeTypeTestCase, self).setUp() + + @property + def resource_config(self): + return { + 'name': 'test_volume_type', + 'extra_specs': { + 'capabilities': 'gpu', + } + } + + def test_create(self, mock_connection): + # Prepare the context for create operation + self._prepare_context_for_operation( + test_name='VolumeTypeTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.create') + + volume_type_instance = openstack.block_storage.v2.type.Type(**{ + 'id': '1', + 'name': 'test_volume_type', + 'extra_specs': { + 'capabilities': 'gpu', + } + }) + # Mock create volume type response + mock_connection().block_storage.create_type = \ + mock.MagicMock(return_value=volume_type_instance) + + # Call create volume type + volume_type.create() + + self.assertEqual(self._ctx.instance.runtime_properties[RESOURCE_ID], + '1') + + self.assertEqual( + self._ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY], + 'test_volume_type') + + self.assertEqual( + self._ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY], + VOLUME_TYPE_OPENSTACK_TYPE) + + def test_delete(self, mock_connection): + # Prepare the context for delete operation + self._prepare_context_for_operation( + test_name='VolumeTypeTestCase', + ctx_operation_name='cloudify.interfaces.lifecycle.delete') + + volume_type_instance = openstack.block_storage.v2.type.Type(**{ + 'id': '1', + 'name': 'test_volume_type', + 'extra_specs': { + 'capabilities': 'gpu', + } + }) + # Mock get volume type response + mock_connection().block_storage.get_type = \ + mock.MagicMock(return_value=volume_type_instance) + + # Mock delete volume type response + mock_connection().block_storage.delete_type = \ + mock.MagicMock(return_value=None) + + # Call delete volume type + volume_type.delete() + + for attr in [RESOURCE_ID, + OPENSTACK_NAME_PROPERTY, + OPENSTACK_TYPE_PROPERTY]: + self.assertNotIn(attr, self._ctx.instance.runtime_properties) diff --git a/openstack_plugin/utils.py b/openstack_plugin/utils.py new file mode 100644 index 00000000..10f100bc --- /dev/null +++ b/openstack_plugin/utils.py @@ -0,0 +1,692 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Standard imports +import sys +import base64 +import inspect + + +# Third part imports +import openstack.exceptions +from cloudify import compute +from cloudify import ctx +from cloudify.exceptions import (NonRecoverableError, OperationRetry) +from cloudify.utils import exception_to_error_cause + +try: + from cloudify.constants import NODE_INSTANCE, RELATIONSHIP_INSTANCE +except ImportError: + NODE_INSTANCE = 'node-instance' + RELATIONSHIP_INSTANCE = 'relationship-instance' + +# Local imports +from openstack_plugin.constants import (PS_OPEN, + PS_CLOSE, + QUOTA_VALID_MSG, + QUOTA_INVALID_MSG, + INFINITE_RESOURCE_QUOTA, + RESOURCE_ID, + OPENSTACK_TYPE_PROPERTY, + OPENSTACK_NAME_PROPERTY, + CLOUDIFY_NEW_NODE_OPERATIONS, + CLOUDIFY_CREATE_OPERATION, + CLOUDIFY_DELETE_OPERATION) + + +def find_relationships_by_node_type_hierarchy(ctx_node_instance, node_type): + """ + Finds all specified relationships of the Cloudify + instance where the related node type is of a specified type. + :param ctx_node_instance: Cloudify node instance which is an instance of + cloudify.context.NodeInstanceContext + :param node_type: Cloudify node type to search node_ctx.relationships for + :return: List of Cloudify relationships + """ + return [target_rel for target_rel in ctx_node_instance.relationships + if node_type in target_rel.target.node.type_hierarchy] + + +def find_relationships_by_openstack_type(_ctx, type_name): + """ + This method will lookup relationships for cloudify node based on the + type of the nodes which are connected to that node + :param _ctx: Cloudify context instance cloudify.context.CloudifyContext + :param str type_name: Node type which is connected to the current node + :return: list of RelationshipSubjectContext + """ + return [rel for rel in _ctx.instance.relationships + if rel.target.instance.runtime_properties.get( + OPENSTACK_TYPE_PROPERTY) == type_name] + + +def find_relationship_by_node_type(ctx_node_instance, node_type): + """ + Finds a single relationship of the Cloudify + instance where the related node type is of a specified type. + :param ctx_node_instance: Cloudify node instance which is an instance of + cloudify.context.NodeInstanceContext + :param node_type: Cloudify node type to search node_ctx.relationships for + :return: A Cloudify relationship or None + """ + relationships = \ + find_relationships_by_node_type_hierarchy(ctx_node_instance, node_type) + return relationships[0] if len(relationships) > 0 else None + + +def find_openstack_ids_of_connected_nodes_by_openstack_type(_ctx, type_name): + """ + This method will return list of openstack ids for connected nodes + associated with the current node instance + :param _ctx: Cloudify context instance cloudify.context.CloudifyContext + :param str type_name: Node type which is connected to the current node + :return: List of openstack resource ids + """ + return [rel.target.instance.runtime_properties[RESOURCE_ID] + for rel in find_relationships_by_openstack_type(_ctx, type_name)] + + +def find_relationships_by_relationship_type(_ctx, type_name): + """ + Find cloudify relationships by relationship type. + Follows the inheritance tree. + :param _ctx: Cloudify context instance cloudify.context.CloudifyContext + :param type_name: desired relationship type derived + from cloudify.relationships.depends_on. + :return: list of RelationshipSubjectContext + """ + + return [rel for rel in _ctx.instance.relationships if + type_name in rel.type_hierarchy] + + +def get_resource_id_from_runtime_properties(ctx_node_instance): + """ + This method will lookup the resource id which is stored as part of + runtime properties + :param ctx_node_instance: Cloudify node instance which is an instance of + cloudify.context.NodeInstanceContext + :return: Resource id + """ + return ctx_node_instance.instance.runtime_properties.get(RESOURCE_ID) + + +def resolve_node_ctx_from_relationship(_ctx): + """ + This method is to decide where to get node from relationship context + since this is not exposed correctly from cloudify + :param _ctx: current cloudify context object + :return: RelationshipSubjectContext instance + """ + # Get the node_id for the current node in order to decide if that node + # is source | target + node_id = _ctx._context.get('node_id') + + source_node_id = _ctx.source._context.get('node_id') + target_node_id = _ctx.target._context.get('node_id') + + if node_id == source_node_id: + return _ctx.source + elif node_id == target_node_id: + return _ctx.target + else: + raise NonRecoverableError( + 'Unable to decide if current node is source or target') + + +def resolve_ctx(_ctx): + """ + This method is to lookup right context instance which could be one of + the following: + 1- Context for source relationship instance + 2- Context for target relationship instance + 3- Context for current node + :param _ctx: current cloudify context object + :return: This could be RelationshipSubjectContext or CloudifyContext + instance + """ + if _ctx.type == RELATIONSHIP_INSTANCE: + return resolve_node_ctx_from_relationship(_ctx) + if _ctx.type != NODE_INSTANCE: + _ctx.logger.warn( + 'CloudifyContext is neither {0} nor {1} type. ' + 'Falling back to {0}. This indicates a problem.'.format( + NODE_INSTANCE, RELATIONSHIP_INSTANCE)) + return _ctx + + +def handle_userdata(existing_userdata): + """ + This method will be responsible for handle user data provided by the + user on the following cases: + 1. When user specify "user_data" to create server on openstack + 2. When "install_method" for agent is set to "Init-script" the plugin + should be able to inject/update "user_data" for server + :param existing_userdata: + :return: final_userdata + """ + # Check the agent init script so that it can be injected to the target + # machine to install the agent daemon + install_agent_userdata = ctx.agent.init_script() + # Get the "os_family" type, by default all node instances extend + # "cloudify.nodes.Compute" node will have "os_family" set to "Linux" + # It can be override for Windows which is need to be handled differently + os_family = ctx.node.properties['os_family'] + + if not (existing_userdata or install_agent_userdata): + return None + + if not existing_userdata: + existing_userdata = '' + + if install_agent_userdata and os_family == 'windows': + + # Get the powershell content from install_agent_userdata + install_agent_userdata = \ + extract_powershell_content(install_agent_userdata) + + # Get the powershell content from existing_userdata + # (If it exists.) + existing_userdata_powershell = \ + extract_powershell_content(existing_userdata) + + # Combine the powershell content from two sources. + install_agent_userdata = \ + '#ps1_sysnative\n{0}\n{1}\n{2}\n{3}\n'.format( + PS_OPEN, + existing_userdata_powershell, + install_agent_userdata, + PS_CLOSE) + + # Additional work on the existing_userdata. + # Remove duplicate Powershell content. + # Get rid of unnecessary newlines. + existing_userdata = \ + existing_userdata.replace( + existing_userdata_powershell, + '').replace( + PS_OPEN, + '').replace( + PS_CLOSE, + '').strip() + + if not existing_userdata or existing_userdata.isspace(): + final_userdata = install_agent_userdata + elif not install_agent_userdata: + final_userdata =\ + compute.create_multi_mimetype_userdata([existing_userdata]) + else: + final_userdata = compute.create_multi_mimetype_userdata( + [existing_userdata, install_agent_userdata]) + + final_userdata = base64.b64encode(final_userdata) + return final_userdata + + +def extract_powershell_content(string_with_powershell): + """We want to filter user data for powershell scripts. + However, Openstack allows only one segment that is Powershell. + So we have to concat separate Powershell scripts into one. + First we separate all Powershell scripts without their tags. + Later we will add the tags back. + """ + + split_string = string_with_powershell.splitlines() + + if not split_string: + return '' + + if split_string[0] == '#ps1_sysnative' or \ + split_string[0] == '#ps1_x86': + split_string.pop(0) + + if PS_OPEN not in split_string: + script_start = -1 # Because we join at +1. + else: + script_start = split_string.index(PS_OPEN) + + if PS_CLOSE not in split_string: + script_end = len(split_string) + else: + script_end = split_string.index(PS_CLOSE) + + # Return everything between Powershell back as a string. + return '\n'.join(split_string[script_start + 1:script_end]) + + +def reset_dict_empty_keys(dict_object): + """ + Reset empty values for object and convert it to None object so that we + can us them when initiate API request + :param dict_object: dict of properties need to be reset + :return dict_object: Updated dict_object + """ + for key, value in dict_object.iteritems(): + if not value: + dict_object[key] = None + return dict_object + + +def update_runtime_properties(properties=None): + """ + Update runtime properties for node instance + :param properties: dict of properties need to be set for node instance + """ + properties = properties or {} + for key, value in properties.items(): + ctx.instance.runtime_properties[key] = value + + +def add_resource_list_to_runtime_properties(openstack_type_name, object_list): + """ + Update runtime properties for node instance with list of available + resources on openstack for certain openstack type + :param openstack_type_name: openstack resource name type + :param object_list: list of all available resources on openstack + """ + objects = [] + for obj in object_list: + if type(obj) not in [str, dict]: + obj = obj.to_dict() + objects.append(obj) + + key_list = '{0}_list'.format(openstack_type_name) + + # if the key already exists then we need to re-generate new data and + # omits the old one if the list command multiple times + if ctx.instance.runtime_properties.get(key_list): + del ctx.instance.runtime_properties[key_list] + + ctx.instance.runtime_properties[key_list] = objects + + +def validate_resource_quota(resource, openstack_type): + """ + Do a validation for openstack resource to make sure it is allowed to + create resource based on available resources created and maximum quota + :param resource: openstack resource instance + :param openstack_type: openstack resource type + """ + ctx.logger.info( + 'validating resource {0} (node {1})' + ''.format(openstack_type, ctx.node.id) + ) + openstack_type_plural = resource.resource_plural(openstack_type) + + resource_list = list(resource.list()) + + # This is the available quota for provisioning the resource + resource_amount = len(resource_list) + + # Log message to give an indication to the caller that there will be a + # call trigger to fetch the quota for current resource + ctx.logger.info( + 'Fetching quota for resource {0} (node {1})' + ''.format(openstack_type, ctx.node.id) + ) + + # This represent the quota for the provided resource openstack type + resource_quota = resource.get_quota_sets(openstack_type_plural) + + if resource_amount < resource_quota \ + or resource_quota == INFINITE_RESOURCE_QUOTA: + ctx.logger.debug( + QUOTA_VALID_MSG.format( + openstack_type, + ctx.node.id, + openstack_type_plural, + resource_amount, + resource_quota) + ) + else: + err_message = \ + QUOTA_INVALID_MSG.format( + openstack_type, + ctx.node.id, + openstack_type_plural, + resource_amount, + resource_quota + ) + ctx.logger.error('VALIDATION ERROR: {0}'.format(err_message)) + raise NonRecoverableError(err_message) + + +def set_runtime_properties_from_resource(ctx_node_instance, + openstack_resource): + """ + Set openstack "type" & "name" as runtime properties for current cloudify + node instance + :param ctx_node_instance: Cloudify node instance which is an instance of + cloudify.context.NodeInstanceContext + :param openstack_resource: Openstack resource instance + """ + if ctx_node_instance and openstack_resource: + ctx_node_instance.instance.runtime_properties[ + OPENSTACK_TYPE_PROPERTY] = openstack_resource.resource_type + + ctx_node_instance.instance.runtime_properties[ + OPENSTACK_NAME_PROPERTY] = openstack_resource.name + + +def unset_runtime_properties_from_instance(ctx_node_instance): + """ + Unset all runtime properties from node instance when delete operation + task if finished + :param ctx_node_instance: Cloudify node instance which is an instance of + cloudify.context.NodeInstanceContext + """ + for key, _ in ctx_node_instance.instance.runtime_properties.items(): + del ctx_node_instance.instance.runtime_properties[key] + + +def prepare_resource_instance(class_decl, ctx_node_instance, kwargs): + """ + This method used to prepare and instantiate instance of openstack resource + So that it can be used to make API request to execute required operations + :param class_decl: Class name of the resource instance we need to create + :param ctx_node_instance: Cloudify node instance which is an instance of + cloudify.context.NodeInstanceContext + :param kwargs: Some config contains data for openstack resource that + could be provided via input task operation + :return: Instance of openstack resource + """ + def get_property_by_name(property_name): + property_value = None + # TODO: Improve this to be more thorough. + if property_name in ctx_node_instance.node.properties: + property_value = \ + ctx_node_instance.node.properties.get(property_name) + + if property_name in ctx_node_instance.instance.runtime_properties: + if isinstance(property_value, dict): + property_value.update( + ctx_node_instance.instance.runtime_properties.get( + property_name)) + else: + property_value = \ + ctx_node_instance.instance.runtime_properties.get( + property_name) + + if property_name in kwargs: + kwargs_value = kwargs.pop(property_name) + if isinstance(property_value, dict): + property_value.update(kwargs_value) + else: + return kwargs_value + return property_value + + client_config = get_property_by_name('client_config') + resource_config = get_property_by_name('resource_config') + + # If this arg is exist, that means user + # provide extra/optional configuration for the defined node + if resource_config.get('kwargs'): + extra_config = resource_config.pop('kwargs') + resource_config.update(extra_config) + + # Check if resource_id is part of runtime properties so that we + # can add it to the resource_config + if RESOURCE_ID in ctx_node_instance.instance.runtime_properties: + resource_config['id'] = \ + ctx_node_instance.instance.runtime_properties[RESOURCE_ID] + + resource = class_decl(client_config=client_config, + resource_config=resource_config, + logger=ctx.logger) + + return resource + + +def update_runtime_properties_for_operation_task(operation_name, + ctx_node_instance, + openstack_resource): + """ + This method will update runtime properties for node instance based on + the operation task being running + :param str operation_name: + :param ctx_node_instance: Cloudify node instance which is an instance of + cloudify.context.NodeInstanceContext + :param openstack_resource: Openstack resource instance + """ + + # Set runtime properties for "name" & "type" when current + # operation is "create", so that they can be used later on + if operation_name == CLOUDIFY_CREATE_OPERATION: + set_runtime_properties_from_resource(ctx_node_instance, + openstack_resource) + # Clean all runtime properties for node instance when current operation + # is delete + elif operation_name == CLOUDIFY_DELETE_OPERATION: + unset_runtime_properties_from_instance(ctx_node_instance) + + +def handle_external_resource(ctx_node_instance, + openstack_resource, + existing_resource_handler=None, + **kwargs): + """ + :param ctx_node_instance: Cloudify context cloudify.context.CloudifyContext + :param openstack_resource: Openstack resource instance + :param existing_resource_handler: Callback handler that used to be + called in order to execute custom operation when "use_external_resource" is + enabled + :param kwargs: Any extra param passed to the existing_resource_handler + """ + + # Get the current operation name + operation_name = get_current_operation() + + # Validate if the "is_external" is set and the resource + # identifier (id|name) for the Openstack is invalid raise error and + # abort the operation + error_message = openstack_resource.validate_resource_identifier() + + # Raise error when validation failed + if error_message: + raise NonRecoverableError(error_message) + + # Cannot delete/create resource when it is external + if operation_name in [CLOUDIFY_CREATE_OPERATION, + CLOUDIFY_DELETE_OPERATION]: + ctx.logger.info( + 'Using external resource {0}'.format(RESOURCE_ID)) + + try: + # Get the remote resource + remote_resource = openstack_resource.get() + except openstack.exceptions.SDKException as error: + _, _, tb = sys.exc_info() + raise NonRecoverableError( + 'Failure while trying to request ' + 'Openstack API: {}'.format(error.message), + causes=[exception_to_error_cause(error, tb)]) + + # Check the operation type and based on that decide what to do + if operation_name == CLOUDIFY_CREATE_OPERATION: + ctx.logger.info( + 'not creating resource {0}' + ' since an external resource is being used' + ''.format(remote_resource.name)) + ctx_node_instance.instance.runtime_properties[RESOURCE_ID] \ + = remote_resource.id + + # Just log message that we cannot delete resource + elif operation_name == CLOUDIFY_DELETE_OPERATION: + ctx.logger.info( + 'not deleting resource {0}' + ' since an external resource is being used' + ''.format(remote_resource.name)) + + # Check if we need to run custom operation for already existed + # resource for operation task + if existing_resource_handler: + # We may need to send the "openstack_resource" to the + # existing resource handler and in order to do that we may + # need to check if the resource is already there or not + func_args = inspect.getargspec(existing_resource_handler).args + if 'openstack_resource' in func_args: + kwargs['openstack_resource'] = openstack_resource + + existing_resource_handler(**kwargs) + + +def get_snapshot_name(object_type, snapshot_name, snapshot_incremental): + """ + Generate snapshot name + :param str object_type: Object type that snapshot is generated for (vm, + disk, ..etc) + :param str snapshot_name: Snapshot name + :param bool snapshot_incremental: Flag to create an incremental snapshots + or full backup + :return: Snapshot name + """ + return "{0}-{1}-{2}-{3}".format( + object_type, get_resource_id_from_runtime_properties(ctx), + snapshot_name, "increment" if snapshot_incremental else "backup") + + +def get_current_operation(): + """ Get the current task operation from current cloudify context + :return str: Operation name + """ + return ctx.operation.name + + +def get_ready_resource_status(resource, + resource_type, + status, + error_statuses): + """ + This method is to check what is the current status of openstack resource + when running certain operation on it and need to make sure that the + resource's operation is done + :param resource: Current instance of openstack resource + :param str resource_type: Resource type need to check status for + :param str status: desired status need to check the resource on + :param list error_statuses: List of error statuses that we should raise + error about if the remote openstack resource matches them + :return: Instance of the current openstack object contains the updated + status and boolean flag to mark it as updated or not + """ + # Get the last updated instance in order to start comparison based + # on the remote status with the desired one that resource should be in + openstack_resource = resource.get() + + # If the remote status of the current object matches one of error + # statuses defined to this method, then a NonRecoverableError must + # be raised + if openstack_resource.status in error_statuses: + raise NonRecoverableError('{0} {1} is in error state' + ''.format(resource_type, + openstack_resource.id)) + + # Check if the openstack resource match the desired status + if openstack_resource.status == status: + return openstack_resource, True + + # The object is not ready yet + return openstack_resource, False + + +def wait_until_status(resource, + resource_type, + status, + error_statuses): + """ + This method is build in order to check the status of the openstack + resource and whether is is ready to be used or not + :param resource: Current instance of openstack resource + :param str resource_type: Resource type need to check status for + :param str status: desired status need to check the resource on + :param list error_statuses: List of error statuses that we should raise + error about if the remote openstack resource matches them + :return: Instance of the current openstack object contains the updated + status + """ + # Check the openstack resource status + openstack_resource, ready = get_ready_resource_status(resource, + resource_type, + status, + error_statuses) + if ready and openstack_resource: + return openstack_resource + else: + message = '{0} {1} current state not ready: {2}'\ + .format(resource_type, + openstack_resource.id, + openstack_resource.status) + + raise OperationRetry(message) + + +def merge_resource_config(resource_config, config): + """ + This method will merge configuration between resource configuration and + any user input configuration + :param dict resource_config: Resource configuration required to create + resource in openstack + :param dict config: User configuration that could merge/override with + resource configuration + """ + if all(item and isinstance(item, dict) + for item in [resource_config, config]): + resource_config.update(**config) + + +def generate_attachment_volume_key(prefix, volume_id, server_id): + """ + This method helps to generate attachment volume key which can be used as + runtime property when running attaching/detaching volume from/to server + :param str prefix: Any prefix that could be added to the the key + :param str volume_id: Unique volume id + :param str server_id: Unique server id + :return str: attachment volume key + """ + if all([prefix, volume_id, server_id]): + return '{0}-{1}-{2}'.format(prefix, volume_id, server_id) + + _ctx = resolve_ctx(ctx) + return '{0}-attachment-volume'.format(_ctx.instance.id) + + +def assign_resource_payload_as_runtime_properties(_ctx, + payload, + resource_type): + """ + Store resource configuration in the runtime + properties and cleans any potentially sensitive data. + :param _ctx: Cloudify context cloudify.context.CloudifyContext + :param dict payload: The payload object for resource + :param str resource_type: Resource openstack type + """ + if all([getattr(ctx, 'instance'), payload, resource_type]): + if resource_type not in ctx.instance.runtime_properties.keys(): + ctx.instance.runtime_properties[resource_type] = {} + for key, value in payload.items(): + if key not in ['user_data', 'adminPass']: + ctx.instance.runtime_properties[resource_type][key] = value + + +def allow_to_run_operation_for_external_node(operation_name): + """ + This method to check if an a current operation is allowed for external + node that has flag "use_external_resource" set "True" + :param (str) operation_name: The cloudify operation name for node + :return bool: Flag to indicate whether or not it is allowed to run + operation for the external node + """ + if operation_name not in CLOUDIFY_NEW_NODE_OPERATIONS: + return True + return False diff --git a/openstack_plugin_common/__init__.py b/openstack_plugin_common/__init__.py deleted file mode 100644 index d802efba..00000000 --- a/openstack_plugin_common/__init__.py +++ /dev/null @@ -1,1235 +0,0 @@ -######### -# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - -from functools import wraps, partial -import json -import os -import sys -import logging -import copy - -from IPy import IP -from keystoneauth1 import loading, session -import cinderclient.client as cinder_client -import cinderclient.exceptions as cinder_exceptions -import keystoneclient.v3.client as keystone_client -import keystoneclient.exceptions as keystone_exceptions -import neutronclient.v2_0.client as neutron_client -import neutronclient.common.exceptions as neutron_exceptions -import novaclient.client as nova_client -import novaclient.exceptions as nova_exceptions -import glanceclient.client as glance_client -import glanceclient.exc as glance_exceptions - -import cloudify -from cloudify import context, ctx -from cloudify.exceptions import NonRecoverableError, RecoverableError - -INFINITE_RESOURCE_QUOTA = -1 - -# properties -USE_EXTERNAL_RESOURCE_PROPERTY = 'use_external_resource' -CREATE_IF_MISSING_PROPERTY = 'create_if_missing' -CONFIG_PROPERTY = 'openstack_config' - -# runtime properties -OPENSTACK_AZ_PROPERTY = 'availability_zone' -OPENSTACK_ID_PROPERTY = 'external_id' # resource's openstack id -OPENSTACK_TYPE_PROPERTY = 'external_type' # resource's openstack type -OPENSTACK_NAME_PROPERTY = 'external_name' # resource's openstack name -OPENSTACK_RESOURCE_PROPERTY = 'external_resource' # resource's parameters -CONDITIONALLY_CREATED = 'conditionally_created' # resource was -# conditionally created -CONFIG_RUNTIME_PROPERTY = CONFIG_PROPERTY # openstack configuration - -# operation inputs -CONFIG_INPUT = CONFIG_PROPERTY - -# runtime properties which all types use -COMMON_RUNTIME_PROPERTIES_KEYS = [OPENSTACK_ID_PROPERTY, - OPENSTACK_TYPE_PROPERTY, - OPENSTACK_NAME_PROPERTY, - OPENSTACK_RESOURCE_PROPERTY, - CONDITIONALLY_CREATED] - -MISSING_RESOURCE_MESSAGE = "Couldn't find a resource of " \ - "type {0} with the name or id {1}" - -AUTH_PARAM_INSECURE = 'insecure' -AUTH_PARM_CA_CERT = 'ca_cert' - -KEY_USE_CFY_LOGGER = 'use_cfy_logger' -KEY_GROUPS = 'groups' -KEY_LOGGERS = 'loggers' - -DEFAULT_LOGGING_CONFIG = { - KEY_USE_CFY_LOGGER: True, - KEY_GROUPS: { - 'nova': logging.DEBUG, - 'neutron': logging.DEBUG, - 'cinder': logging.DEBUG, - 'keystone': logging.DEBUG, - 'glance': logging.DEBUG - }, - KEY_LOGGERS: { - 'keystoneauth.session': logging.DEBUG - } -} - -LOGGING_GROUPS = { - 'nova': ['novaclient.client', 'novaclient.v2.client'], - 'neutron': ['neutronclient.client', 'neutronclient.v2_0.client'], - 'cinder': ['cinderclient.client', 'cinderclient.v1.client', - 'cinderclient.v2.client', 'cinderclient.v3.client'], - 'keystone': ['keystoneclient.client', 'keystoneclient.v2_0.client', - 'keystoneclient.v3.client'], - 'glance': ['glanceclient.client', 'glanceclient.v1.client', - 'glanceclient.v2.client'] -} - - -# TODO: Move this to cloudify-plugins-common (code freeze currently -# in effect). -class CloudifyCtxLogHandler(logging.Handler): - """ - A logging handler for Cloudify. - A logger attached to this handler will result in logging being passed - through to the Cloudify logger. - """ - def __init__(self, ctx): - """ - Constructor. - :param ctx: current Cloudify context - """ - logging.Handler.__init__(self) - self.ctx = ctx - - def emit(self, record): - """ - Callback to emit a log record. - :param record: log record to write - :type record: logging.LogRecord - """ - message = self.format(record) - self.ctx.logger.log(record.levelno, message) - - -class ProviderContext(object): - - def __init__(self, provider_context): - self._provider_context = provider_context or {} - self._resources = self._provider_context.get('resources', {}) - - @property - def agents_keypair(self): - return self._resources.get('agents_keypair') - - @property - def agents_security_group(self): - return self._resources.get('agents_security_group') - - @property - def ext_network(self): - return self._resources.get('ext_network') - - @property - def floating_ip(self): - return self._resources.get('floating_ip') - - @property - def int_network(self): - return self._resources.get('int_network') - - @property - def management_keypair(self): - return self._resources.get('management_keypair') - - @property - def management_security_group(self): - return self._resources.get('management_security_group') - - @property - def management_server(self): - return self._resources.get('management_server') - - @property - def router(self): - return self._resources.get('router') - - @property - def subnet(self): - return self._resources.get('subnet') - - def __repr__(self): - info = json.dumps(self._provider_context) - return '<' + self.__class__.__name__ + ' ' + info + '>' - - -def provider(ctx): - return ProviderContext(ctx.provider_context) - - -def assign_payload_as_runtime_properties(ctx, resource_name, payload={}): - """ - In general Openstack API objects have create, update, and delete - functions. Each function normally receives a payload that describes - the desired configuration of the object. - This makes sure to store that configuration in the runtime - properties and cleans any potentially sensitive data. - - :param ctx: The Cloudify NodeInstanceContext - :param resource_name: A string describing the resource. - :param payload: The payload. - :return: - """ - - # Avoid failing if a developer inadvertently passes a - # non-NodeInstanceContext - if getattr(ctx, 'instance'): - if resource_name not in ctx.instance.runtime_properties.keys(): - ctx.instance.runtime_properties[resource_name] = {} - for key, value in payload.items(): - if key != 'user_data' and key != 'adminPass': - ctx.instance.runtime_properties[resource_name][key] = value - - -def get_relationships_by_relationship_type(ctx, type_name): - """ - Get cloudify relationships by relationship type. - Follows the inheritance tree. - - :param ctx: Cloudify NodeInstanceContext - :param type_name: desired relationship type derived - from cloudify.relationships.depends_on. - :return: list of RelationshipSubjectContext - """ - - return [rel for rel in ctx.instance.relationships if - type_name in rel.type_hierarchy] - - -def get_attribute_of_connected_nodes_by_relationship_type(ctx, - type_name, - attribute_name): - """ - Returns a list of OPENSTACK_ID_PROPERTY from a list of - Cloudify RelationshipSubjectContext. - - :param ctx: Cloudify NodeInstanceContext - :param type_name: desired relationship type derived - from cloudify.relationships.depends_on. - :param attribute_name: usually either - OPENSTACK_NAME_PROPERTY or OPENSTACK_ID_PROPERTY - :return: - """ - - return [rel.target.instance.runtime_properties[attribute_name] - for rel in get_relationships_by_relationship_type(ctx, type_name)] - - -def get_relationships_by_openstack_type(ctx, type_name): - return [rel for rel in ctx.instance.relationships - if rel.target.instance.runtime_properties.get( - OPENSTACK_TYPE_PROPERTY) == type_name] - - -def get_connected_nodes_by_openstack_type(ctx, type_name): - return [rel.target.node - for rel in get_relationships_by_openstack_type(ctx, type_name)] - - -def get_openstack_ids_of_connected_nodes_by_openstack_type(ctx, type_name): - return [rel.target.instance.runtime_properties[OPENSTACK_ID_PROPERTY] - for rel in get_relationships_by_openstack_type(ctx, type_name) - ] - - -def get_openstack_names_of_connected_nodes_by_openstack_type(ctx, type_name): - return [rel.target.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] - for rel in get_relationships_by_openstack_type(ctx, type_name) - ] - - -def get_single_connected_node_by_openstack_type( - ctx, type_name, if_exists=False): - nodes = get_connected_nodes_by_openstack_type(ctx, type_name) - check = len(nodes) > 1 if if_exists else len(nodes) != 1 - if check: - raise NonRecoverableError( - 'Expected {0} one {1} node. got {2}'.format( - 'at most' if if_exists else 'exactly', type_name, len(nodes))) - return nodes[0] if nodes else None - - -def get_openstack_id_of_single_connected_node_by_openstack_type( - ctx, type_name, if_exists=False): - ids = get_openstack_ids_of_connected_nodes_by_openstack_type(ctx, - type_name) - check = len(ids) > 1 if if_exists else len(ids) != 1 - if check: - raise NonRecoverableError( - 'Expected {0} one {1} capability. got {2}'.format( - 'at most' if if_exists else 'exactly', type_name, len(ids))) - return ids[0] if ids else None - - -def get_resource_id(ctx, type_name): - if ctx.node.properties['resource_id']: - return ctx.node.properties['resource_id'] - return "{0}_{1}_{2}".format(type_name, ctx.deployment.id, ctx.instance.id) - - -def get_property(ctx, property_name, kwargs={}, default=None): - return kwargs.get( - property_name, - ctx.node.properties.get(property_name, default) - ) - - -def transform_resource_name(ctx, res): - - if isinstance(res, basestring): - res = {'name': res} - - if not isinstance(res, dict): - raise ValueError("transform_resource_name() expects either string or " - "dict as the first parameter") - - pfx = ctx.bootstrap_context.resources_prefix - - if not pfx: - return res['name'] - - name = res['name'] - res['name'] = pfx + name - - if name.startswith(pfx): - ctx.logger.warn("Prefixing resource '{0}' with '{1}' but it " - "already has this prefix".format(name, pfx)) - else: - ctx.logger.info("Transformed resource name '{0}' to '{1}'".format( - name, res['name'])) - - return res['name'] - - -def _get_resource_by_name_or_id_from_ctx(ctx, name_field_name, openstack_type, - sugared_client): - resource_id = ctx.instance.runtime_properties.get( - OPENSTACK_ID_PROPERTY, ctx.node.properties['resource_id']) - if not resource_id: - raise NonRecoverableError( - "Can't set '{0}' to True without supplying a value for " - "'resource_id'".format(USE_EXTERNAL_RESOURCE_PROPERTY)) - - return get_resource_by_name_or_id(resource_id, openstack_type, - sugared_client, True, name_field_name) - - -def get_resource_by_name_or_id( - resource_id, openstack_type, sugared_client, - raise_if_not_found=True, name_field_name='name'): - - # search for resource by name (or name-equivalent field) - search_param = {name_field_name: resource_id} - resource = sugared_client.cosmo_get_if_exists(openstack_type, - **search_param) - if not resource: - # fallback - search for resource by id - resource = sugared_client.cosmo_get_if_exists( - openstack_type, id=resource_id) - - if not resource and raise_if_not_found: - raise NonRecoverableError( - MISSING_RESOURCE_MESSAGE.format(openstack_type, resource_id)) - - return resource - - -def use_external_resource(ctx, sugared_client, openstack_type, - name_field_name='name'): - if not is_external_resource(ctx): - return None - try: - resource = _get_resource_by_name_or_id_from_ctx( - ctx, name_field_name, openstack_type, sugared_client) - except NonRecoverableError: - if is_create_if_missing(ctx): - ctx.instance.runtime_properties[CONDITIONALLY_CREATED] = True - return None - else: - raise - - ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = \ - sugared_client.get_id_from_resource(resource) - ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] = openstack_type - ctx.instance.runtime_properties[OPENSTACK_RESOURCE_PROPERTY] = \ - resource if isinstance(resource, dict) else resource.to_dict() - - from openstack_plugin_common.floatingip import FLOATINGIP_OPENSTACK_TYPE - # store openstack name runtime property, unless it's a floating IP type, - # in which case the ip will be stored in the runtime properties instead. - if openstack_type != FLOATINGIP_OPENSTACK_TYPE: - ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = \ - sugared_client.get_name_from_resource(resource) - - ctx.logger.info('Using external resource {0}: {1}'.format( - openstack_type, ctx.instance.runtime_properties.get( - OPENSTACK_NAME_PROPERTY, ctx.node.properties['resource_id']))) - return resource - - -def validate_resource(ctx, sugared_client, openstack_type, - name_field_name='name'): - ctx.logger.debug('validating resource {0} (node {1})'.format( - openstack_type, ctx.node.id)) - - openstack_type_plural = sugared_client.cosmo_plural(openstack_type) - resource = None - - if is_external_resource(ctx): - - try: - # validate the resource truly exists - resource = _get_resource_by_name_or_id_from_ctx( - ctx, name_field_name, openstack_type, sugared_client) - ctx.logger.debug('OK: {0} {1} found in pool'.format( - openstack_type, ctx.node.properties['resource_id'])) - except NonRecoverableError as e: - if not is_create_if_missing(ctx): - ctx.logger.error('VALIDATION ERROR: ' + str(e)) - resource_list = list(sugared_client.cosmo_list(openstack_type)) - if resource_list: - ctx.logger.info('list of existing {0}: '.format( - openstack_type_plural)) - for resource in resource_list: - ctx.logger.info(' {0:>10} - {1}'.format( - sugared_client.get_id_from_resource(resource), - sugared_client.get_name_from_resource(resource))) - else: - ctx.logger.info('there are no existing {0}'.format( - openstack_type_plural)) - raise - if not resource: - if isinstance(sugared_client, NovaClientWithSugar): - # not checking quota for Nova resources due to a bug in Nova client - return - - # validate available quota for provisioning the resource - resource_list = list(sugared_client.cosmo_list(openstack_type)) - resource_amount = len(resource_list) - - resource_quota = sugared_client.get_quota(openstack_type) - - if resource_amount < resource_quota \ - or resource_quota == INFINITE_RESOURCE_QUOTA: - ctx.logger.debug( - 'OK: {0} (node {1}) can be created. provisioned {2}: {3}, ' - 'quota: {4}' - .format(openstack_type, ctx.node.id, openstack_type_plural, - resource_amount, resource_quota)) - else: - err = ('{0} (node {1}) cannot be created due to quota limitations.' - ' provisioned {2}: {3}, quota: {4}' - .format(openstack_type, ctx.node.id, openstack_type_plural, - resource_amount, resource_quota)) - ctx.logger.error('VALIDATION ERROR:' + err) - raise NonRecoverableError(err) - - -def delete_resource_and_runtime_properties(ctx, sugared_client, - runtime_properties_keys): - node_openstack_type = ctx.instance.runtime_properties[ - OPENSTACK_TYPE_PROPERTY] - - if not is_external_resource_not_conditionally_created(ctx): - ctx.logger.info('deleting {0}'.format(node_openstack_type)) - - sugared_client.cosmo_delete_resource( - node_openstack_type, - ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY]) - else: - ctx.logger.info('not deleting {0} since an external {0} is ' - 'being used'.format(node_openstack_type)) - - delete_runtime_properties(ctx, runtime_properties_keys) - - -def is_external_resource(ctx): - return is_external_resource_by_properties(ctx.node.properties) - - -def is_external_resource_not_conditionally_created(ctx): - return is_external_resource_by_properties(ctx.node.properties) and \ - not ctx.instance.runtime_properties.get(CONDITIONALLY_CREATED) - - -def is_external_relationship_not_conditionally_created(ctx): - return is_external_resource_by_properties(ctx.source.node.properties) and \ - is_external_resource_by_properties(ctx.target.node.properties) and \ - not ctx.source.instance.runtime_properties.get( - CONDITIONALLY_CREATED) and not \ - ctx.target.instance.runtime_properties.get(CONDITIONALLY_CREATED) - - -def is_create_if_missing(ctx): - return is_create_if_missing_by_properties(ctx.node.properties) - - -def is_external_relationship(ctx): - return is_external_resource_by_properties(ctx.source.node.properties) and \ - is_external_resource_by_properties(ctx.target.node.properties) - - -def is_external_resource_by_properties(properties): - return USE_EXTERNAL_RESOURCE_PROPERTY in properties and \ - properties[USE_EXTERNAL_RESOURCE_PROPERTY] - - -def is_create_if_missing_by_properties(properties): - return CREATE_IF_MISSING_PROPERTY in properties and \ - properties[CREATE_IF_MISSING_PROPERTY] - - -def delete_runtime_properties(ctx, runtime_properties_keys): - for runtime_prop_key in runtime_properties_keys: - if runtime_prop_key in ctx.instance.runtime_properties: - del ctx.instance.runtime_properties[runtime_prop_key] - - -def validate_ip_or_range_syntax(ctx, address, is_range=True): - range_suffix = ' range' if is_range else '' - ctx.logger.debug('checking whether {0} is a valid address{1}...' - .format(address, range_suffix)) - try: - IP(address) - ctx.logger.debug('OK:' - '{0} is a valid address{1}.'.format(address, - range_suffix)) - except ValueError as e: - err = ('{0} is not a valid address{1}; {2}'.format( - address, range_suffix, e.message)) - ctx.logger.error('VALIDATION ERROR:' + err) - raise NonRecoverableError(err) - - -def get_openstack_id(ctx): - return ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] - - -def get_openstack_type(ctx): - return ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] - - -def create_object_dict(ctx, object_name, args, object_dict=None): - object_dict = object_dict if object_dict is not None else {} - object_dict['name'] = get_resource_id(ctx, object_name) - object_dict.update(ctx.node.properties[object_name], **args) - transform_resource_name(ctx, object_dict) - return object_dict - - -def add_list_to_runtime_properties(ctx, openstack_type_name, object_list): - - objects = [] - - for obj in object_list: - if type(obj) not in [str, dict]: - obj = obj.to_dict() - objects.append(obj) - - ctx.instance.runtime_properties[openstack_type_name + '_list'] \ - = objects - return objects - - -def set_openstack_runtime_properties(ctx, openstack_object, openstack_type): - ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = \ - openstack_object.id - ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] = \ - openstack_type - ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = \ - openstack_object.name - - -def set_neutron_runtime_properties(ctx, openstack_object, openstack_type): - ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = \ - openstack_object['id'] - ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] = \ - openstack_type - ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = \ - openstack_object['name'] - - -class Config(object): - - OPENSTACK_CONFIG_PATH_ENV_VAR = 'OPENSTACK_CONFIG_PATH' - OPENSTACK_CONFIG_PATH_DEFAULT_PATH = '~/openstack_config.json' - OPENSTACK_ENV_VAR_PREFIX = 'OS_' - OPENSTACK_SUPPORTED_ENV_VARS = { - 'OS_AUTH_URL', 'OS_USERNAME', 'OS_PASSWORD', 'OS_TENANT_NAME', - 'OS_REGION_NAME', 'OS_PROJECT_ID', 'OS_PROJECT_NAME', - 'OS_USER_DOMAIN_NAME', 'OS_PROJECT_DOMAIN_NAME' - } - - @classmethod - def get(cls): - static_config = cls._build_config_from_env_variables() - env_name = cls.OPENSTACK_CONFIG_PATH_ENV_VAR - default_location_tpl = cls.OPENSTACK_CONFIG_PATH_DEFAULT_PATH - default_location = os.path.expanduser(default_location_tpl) - config_path = os.getenv(env_name, default_location) - try: - with open(config_path) as f: - cls.update_config(static_config, json.loads(f.read())) - except IOError: - pass - return static_config - - @classmethod - def _build_config_from_env_variables(cls): - return {v.lstrip(cls.OPENSTACK_ENV_VAR_PREFIX).lower(): os.environ[v] - for v in cls.OPENSTACK_SUPPORTED_ENV_VARS if v in os.environ} - - @staticmethod - def update_config(overridden_cfg, overriding_cfg): - """ this method is like dict.update() only that it doesn't override - with (or set new) empty values (e.g. empty string) """ - for k, v in overriding_cfg.iteritems(): - if v: - overridden_cfg[k] = v - - -class OpenStackClient(object): - - COMMON = {'username', 'password', 'auth_url'} - AUTH_SETS = [ - COMMON | {'tenant_name'}, - COMMON | {'project_id', 'user_domain_name'}, - COMMON | {'project_id', 'project_name', 'user_domain_name'}, - COMMON | {'project_name', 'user_domain_name', 'project_domain_name'}, - ] - - NON_AUTH_ITEMS = ['region', 'insecure', - 'ca_cert', 'nova_url', - 'neutron_url', 'custom_configuration', 'logging'] - - OPTIONAL_AUTH_PARAMS = {AUTH_PARAM_INSECURE, AUTH_PARM_CA_CERT} - - def __init__(self, client_name, client_class, config=None, *args, **kw): - cfg = Config.get() - if config: - Config.update_config(cfg, config) - - # This check to make sure that blueprint openstack config - # contains all the required auth params + any non-auth param - if set(config.keys())\ - in self.AUTH_SETS and config.keys() in self.NON_AUTH_ITEMS: - - # Check if there is any value exists on ``cfg`` - # that does not exist on ``config`` then these extra params - # should be removed to prevent any merging conflicts - removed_keys = [] - for k, v in cfg.iteritems(): - if k not in config: - removed_keys.append(k) - - for key in removed_keys: - del cfg[key] - - v3 = '/v3' in cfg['auth_url'] - # Newer libraries expect the region key to be `region_name`, not - # `region`. - region = cfg.pop('region', None) - if v3 and region: - cfg['region_name'] = region - - # Calculate effective logging policy. - # Note that we don't use dict's update() function at the dict's - # root because it will overwrite nested dicts. - - logging_config = cfg.pop('logging', dict()) - use_cfy_logger = logging_config.get(KEY_USE_CFY_LOGGER) - groups_config = logging_config.get(KEY_GROUPS, {}) - loggers_config = logging_config.get(KEY_LOGGERS, {}) - - final_logging_cfg = copy.deepcopy(DEFAULT_LOGGING_CONFIG) - - if use_cfy_logger: - final_logging_cfg[KEY_USE_CFY_LOGGER] = use_cfy_logger - else: - use_cfy_logger = final_logging_cfg[KEY_USE_CFY_LOGGER] - final_logging_cfg[KEY_GROUPS].update(groups_config) - final_logging_cfg[KEY_LOGGERS].update(loggers_config) - - # Prepare mapping between logger names and logging levels. - configured_loggers = {v: final_logging_cfg[KEY_GROUPS][k] for - k, values in LOGGING_GROUPS.items() for v - in values} - configured_loggers.update(final_logging_cfg[KEY_LOGGERS]) - - ctx_log_handler = CloudifyCtxLogHandler(ctx) if use_cfy_logger \ - else None - - for logger_name, logger_level in configured_loggers.items(): - logger = logging.getLogger(logger_name) - if ctx_log_handler: - logger.addHandler(ctx_log_handler) - logger.setLevel(logger_level) - - cfg = self._merge_custom_configuration(cfg, client_name) - - auth_params, client_params = OpenStackClient._split_config(cfg) - OpenStackClient._validate_auth_params(auth_params) - - if v3: - # keystone v3 complains if these aren't set. - for key in 'user_domain_name', 'project_domain_name': - auth_params.setdefault(key, 'default') - - client_params['session'] = self._authenticate(auth_params) - self._client = client_class(**client_params) - - @classmethod - def _validate_auth_params(cls, params): - if set(params.keys()) - cls.OPTIONAL_AUTH_PARAMS in cls.AUTH_SETS: - return - - def set2str(s): - return '({})'.format(', '.join(sorted(s))) - - received_params = set2str(params) - valid_auth_sets = map(set2str, cls.AUTH_SETS) - raise NonRecoverableError( - "{} is not valid set of auth params. Expected to find parameters " - "either as environment variables, in a JSON file (at either a " - "path which is set under the environment variable {} or at the " - "default location {}), or as nested properties under an " - "'{}' property. Valid auth param sets are: {}." - .format(received_params, - Config.OPENSTACK_CONFIG_PATH_ENV_VAR, - Config.OPENSTACK_CONFIG_PATH_DEFAULT_PATH, - CONFIG_PROPERTY, - ', '.join(valid_auth_sets))) - - @staticmethod - def _merge_custom_configuration(cfg, client_name): - config = cfg.copy() - - mapping = { - 'nova_url': 'nova_client', - 'neutron_url': 'neutron_client' - } - for key in 'nova_url', 'neutron_url': - val = config.pop(key, None) - if val is not None: - ctx.logger.warn( - "'{}' property is deprecated. Use `custom_configuration" - ".{}.endpoint_override` instead.".format( - key, mapping[key])) - if mapping.get(key, None) == client_name: - config['endpoint_override'] = val - - if 'custom_configuration' in cfg: - del config['custom_configuration'] - config.update(cfg['custom_configuration'].get(client_name, {})) - return config - - @classmethod - def _split_config(cls, cfg): - all = reduce(lambda x, y: x | y, cls.AUTH_SETS) - all |= cls.OPTIONAL_AUTH_PARAMS - - auth, misc = {}, {} - for param, value in cfg.items(): - if param in all: - auth[param] = value - else: - misc[param] = value - return auth, misc - - @staticmethod - def _authenticate(cfg): - # 'verify' will contain one of the following: - # - # True: perform certificate validation against the underlying - # CA certs bundle (note: this is the certs bundle used - # by the 'requests' library, which is different from the - # OS-provided one). - # - # To get that, specify 'insecure: True'. - # - # False: disable certificate validation altogether. - # - # To get that, specify 'insecure: False' (or any value - # other than True). - # - # Any other string: path to the CA cert (or bundle) to verify - # against. - # - # To get that, specify 'ca_cert: path_to_file' - # and ensure 'insecure' is NOT specified. - verify = True - if AUTH_PARAM_INSECURE in cfg: - cfg = cfg.copy() - # NOTE: Next line will evaluate to False only when insecure is set - # to True. Any other value (string etc.) will force verify to True. - # This is done on purpose, since we do not wish to use insecure - # connection by mistake. - # Update: We are adding handling for casting 'True' or 'true' as a - # bool because if this value is set via an instrinsic function - # it will always be a string. - cfg_insecure = cfg[AUTH_PARAM_INSECURE] - if isinstance(cfg_insecure, basestring) and \ - cfg_insecure.capitalize() == 'True': - cfg[AUTH_PARAM_INSECURE] = True - verify = not (cfg[AUTH_PARAM_INSECURE] is True) - del cfg[AUTH_PARAM_INSECURE] - elif AUTH_PARM_CA_CERT in cfg: - cfg = cfg.copy() - verify = cfg[AUTH_PARM_CA_CERT] - del cfg[AUTH_PARM_CA_CERT] - - loader = loading.get_plugin_loader("password") - auth = loader.load_from_options(**cfg) - sess = session.Session(auth=auth, verify=verify) - return sess - - # Proxy any unknown call to base client - def __getattr__(self, attr): - return getattr(self._client, attr) - - # Sugar, common to all clients - def cosmo_plural(self, obj_type_single): - return obj_type_single + 's' - - def cosmo_get_named(self, obj_type_single, name, **kw): - return self.cosmo_get(obj_type_single, name=name, **kw) - - def cosmo_get(self, obj_type_single, **kw): - return self._cosmo_get(obj_type_single, False, **kw) - - def cosmo_get_if_exists(self, obj_type_single, **kw): - return self._cosmo_get(obj_type_single, True, **kw) - - def _cosmo_get(self, obj_type_single, if_exists, **kw): - ls = list(self.cosmo_list(obj_type_single, **kw)) - check = len(ls) > 1 if if_exists else len(ls) != 1 - if check: - raise NonRecoverableError( - "Expected {0} one object of type {1} " - "with match {2} but there are {3}".format( - 'at most' if if_exists else 'exactly', - obj_type_single, kw, len(ls))) - return ls[0] if ls else None - - -class GlanceClient(OpenStackClient): - - # Can't glance_url be figured out from keystone - REQUIRED_CONFIG_PARAMS = \ - ['username', 'password', 'tenant_name', 'auth_url'] - - def connect(self, cfg): - loader = loading.get_plugin_loader('password') - auth = loader.load_from_options( - auth_url=cfg['auth_url'], - username=cfg['username'], - password=cfg['password'], - tenant_name=cfg['tenant_name']) - sess = session.Session(auth=auth) - - client_kwargs = dict( - session=sess, - ) - if cfg.get('glance_url'): - client_kwargs['endpoint'] = cfg['glance_url'] - - return GlanceClientWithSugar(**client_kwargs) - - -# Decorators -def _find_instanceof_in_kw(cls, kw): - ret = [v for v in kw.values() if isinstance(v, cls)] - if not ret: - return None - if len(ret) > 1: - raise NonRecoverableError( - "Expected to find exactly one instance of {0} in " - "kwargs but found {1}".format(cls, len(ret))) - return ret[0] - - -def _find_context_in_kw(kw): - return _find_instanceof_in_kw(cloudify.context.CloudifyContext, kw) - - -def with_neutron_client(f): - @wraps(f) - def wrapper(*args, **kw): - _handle_kw('neutron_client', NeutronClientWithSugar, kw) - - try: - return f(*args, **kw) - except neutron_exceptions.NeutronClientException as e: - if e.status_code in _non_recoverable_error_codes: - _re_raise(e, recoverable=False, status_code=e.status_code) - else: - raise - return wrapper - - -def with_nova_client(f): - @wraps(f) - def wrapper(*args, **kw): - _handle_kw('nova_client', NovaClientWithSugar, kw) - - try: - return f(*args, **kw) - except nova_exceptions.OverLimit as e: - _re_raise(e, recoverable=True, retry_after=e.retry_after) - except nova_exceptions.ClientException as e: - if e.code in _non_recoverable_error_codes: - _re_raise(e, recoverable=False, status_code=e.code) - else: - raise - return wrapper - - -def with_cinder_client(f): - @wraps(f) - def wrapper(*args, **kw): - _handle_kw('cinder_client', CinderClientWithSugar, kw) - - try: - return f(*args, **kw) - except cinder_exceptions.ClientException as e: - if e.code in _non_recoverable_error_codes: - _re_raise(e, recoverable=False, status_code=e.code) - else: - raise - return wrapper - - -def with_glance_client(f): - @wraps(f) - def wrapper(*args, **kw): - _handle_kw('glance_client', GlanceClientWithSugar, kw) - - try: - return f(*args, **kw) - except glance_exceptions.ClientException as e: - if e.code in _non_recoverable_error_codes: - _re_raise(e, recoverable=False, status_code=e.code) - else: - raise - return wrapper - - -def with_keystone_client(f): - @wraps(f) - def wrapper(*args, **kw): - _handle_kw('keystone_client', KeystoneClientWithSugar, kw) - - try: - return f(*args, **kw) - except keystone_exceptions.HTTPError as e: - if e.http_status in _non_recoverable_error_codes: - _re_raise(e, recoverable=False, status_code=e.http_status) - else: - raise - except keystone_exceptions.ClientException as e: - _re_raise(e, recoverable=False) - return wrapper - - -def _handle_kw(client_name, client_class, kw): - - _ctx = _find_context_in_kw(kw) or ctx - if _ctx.type == context.NODE_INSTANCE: - config = _ctx.node.properties.get(CONFIG_PROPERTY) - resource_id = kw.pop( - 'resource_id', _ctx.node.properties.get('resource_id')) - rt_config = _ctx.instance.runtime_properties.get( - CONFIG_RUNTIME_PROPERTY) - if resource_id and OPENSTACK_ID_PROPERTY not in \ - _ctx.instance.runtime_properties: - _ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = \ - resource_id - elif _ctx.type == context.RELATIONSHIP_INSTANCE: - config = _ctx.source.node.properties.get(CONFIG_PROPERTY) - rt_config = _ctx.source.instance.runtime_properties.get( - CONFIG_RUNTIME_PROPERTY) - resource_id = kw.pop( - 'resource_id', _ctx.source.node.properties.get('resource_id')) - if not config: - config = _ctx.target.node.properties.get(CONFIG_PROPERTY) - rt_config = _ctx.target.instance.runtime_properties.get( - CONFIG_RUNTIME_PROPERTY) - if resource_id and OPENSTACK_ID_PROPERTY not in \ - _ctx.source.instance.runtime_properties: - _ctx.source.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = \ - resource_id - else: - config = None - rt_config = None - - # Overlay with configuration from runtime property, if any. - if rt_config: - if config: - config = config.copy() - config.update(rt_config) - else: - config = rt_config - - if client_name in kw: - return - if CONFIG_INPUT in kw: - if config: - config = config.copy() - config.update(kw[CONFIG_INPUT]) - else: - config = kw[CONFIG_INPUT] - kw[client_name] = client_class(config=config) - - -_non_recoverable_error_codes = [400, 401, 403, 404, 409] - - -def _re_raise(e, recoverable, retry_after=None, status_code=None): - exc_type, exc, traceback = sys.exc_info() - message = e.message - if status_code is not None: - message = '{0} [status_code={1}]'.format(message, status_code) - if recoverable: - if retry_after == 0: - retry_after = None - raise RecoverableError( - message=message, - retry_after=retry_after), None, traceback - else: - raise NonRecoverableError(message), None, traceback - - -# Sugar for clients - -class NovaClientWithSugar(OpenStackClient): - - def __init__(self, *args, **kw): - config = kw['config'] - if config.get('nova_url'): - config['endpoint_override'] = config.pop('nova_url') - - try: - nova_client_version = \ - config['custom_configuration']['nova_client'].pop( - 'version') - except KeyError: - nova_client_version = '2' - - # In case someone provides an int. - if not isinstance(nova_client_version, basestring): - nova_client_version = str(nova_client_version) - - super(NovaClientWithSugar, self).__init__( - 'nova_client', - partial(nova_client.Client, nova_client_version), *args, **kw) - - def cosmo_list(self, obj_type_single, **kw): - """ Sugar for xxx.findall() - not using xxx.list() because findall - can receive filtering parameters, and it's common for all types""" - obj_type_plural = self._get_nova_field_name_for_type(obj_type_single) - for obj in getattr(self, obj_type_plural).findall(**kw): - yield obj - - def cosmo_delete_resource(self, obj_type_single, obj_id): - obj_type_plural = self._get_nova_field_name_for_type(obj_type_single) - getattr(self, obj_type_plural).delete(obj_id) - - def get_id_from_resource(self, resource): - return resource.id - - def get_name_from_resource(self, resource): - return resource.name - - def get_quota(self, obj_type_single): - raise RuntimeError( - 'Retrieving quotas from Nova service is currently unsupported ' - 'due to a bug in Nova python client') - - # we're already authenticated, but the following call will make - # 'service_catalog' available under 'client', through which we can - # extract the tenant_id (Note that self.client.tenant_id might be - # None if project_id (AKA tenant_name) was used instead; However the - # actual tenant_id must be used to retrieve the quotas) - self.client.authenticate() - tenant_id = self.client.service_catalog.get_tenant_id() - quotas = self.quotas.get(tenant_id) - return getattr(quotas, self.cosmo_plural(obj_type_single)) - - def _get_nova_field_name_for_type(self, obj_type_single): - from openstack_plugin_common.floatingip import \ - FLOATINGIP_OPENSTACK_TYPE - if obj_type_single == FLOATINGIP_OPENSTACK_TYPE: - # since we use the same 'openstack type' property value for both - # neutron and nova floating-ips, this adjustment must be made - # for nova client, as fields names differ between the two clients - obj_type_single = 'floating_ip' - return self.cosmo_plural(obj_type_single) - - -class NeutronClientWithSugar(OpenStackClient): - - def __init__(self, *args, **kw): - super(NeutronClientWithSugar, self).__init__( - 'neutron_client', neutron_client.Client, *args, **kw) - - def cosmo_list(self, obj_type_single, **kw): - """ Sugar for list_XXXs()['XXXs'] """ - obj_type_plural = self.cosmo_plural(obj_type_single) - for obj in getattr(self, 'list_' + obj_type_plural)(**kw)[ - obj_type_plural]: - yield obj - - def cosmo_delete_resource(self, obj_type_single, obj_id): - getattr(self, 'delete_' + obj_type_single)(obj_id) - - def get_id_from_resource(self, resource): - return resource['id'] - - def get_name_from_resource(self, resource): - return resource['name'] - - def get_quota(self, obj_type_single): - tenant_id = self.get_quotas_tenant()['tenant']['tenant_id'] - quotas = self.show_quota(tenant_id)['quota'] - return quotas[obj_type_single] - - def cosmo_list_prefixed(self, obj_type_single, name_prefix): - for obj in self.cosmo_list(obj_type_single): - if obj['name'].startswith(name_prefix): - yield obj - - def cosmo_delete_prefixed(self, name_prefix): - # Cleanup all neutron.list_XXX() objects with names starting - # with self.name_prefix - for obj_type_single in 'port', 'router', 'network', 'subnet',\ - 'security_group': - for obj in self.cosmo_list_prefixed(obj_type_single, name_prefix): - if obj_type_single == 'router': - ports = self.cosmo_list('port', device_id=obj['id']) - for port in ports: - try: - self.remove_interface_router( - port['device_id'], - {'port_id': port['id']}) - except neutron_exceptions.NeutronClientException: - pass - getattr(self, 'delete_' + obj_type_single)(obj['id']) - - def cosmo_find_external_net(self): - """ For tests of floating IP """ - nets = self.list_networks()['networks'] - ls = [net for net in nets if net.get('router:external')] - if len(ls) != 1: - raise NonRecoverableError( - "Expected exactly one external network but found {0}".format( - len(ls))) - return ls[0] - - -class CinderClientWithSugar(OpenStackClient): - - def __init__(self, *args, **kw): - super(CinderClientWithSugar, self).__init__( - 'cinder_client', partial(cinder_client.Client, '2'), *args, **kw) - - def cosmo_list(self, obj_type_single, **kw): - obj_type_plural = self.cosmo_plural(obj_type_single) - for obj in getattr(self, obj_type_plural).findall(**kw): - yield obj - - def cosmo_delete_resource(self, obj_type_single, obj_id): - obj_type_plural = self.cosmo_plural(obj_type_single) - getattr(self, obj_type_plural).delete(obj_id) - - def get_id_from_resource(self, resource): - return resource.id - - def get_name_from_resource(self, resource): - return resource.name - - def get_quota(self, obj_type_single): - # we're already authenticated, but the following call will make - # 'service_catalog' available under 'client', through which we can - # extract the tenant_id (Note that self.client.tenant_id might be - # None if project_id (AKA tenant_name) was used instead; However the - # actual tenant_id must be used to retrieve the quotas) - self.client.authenticate() - project_id = self.client.session.get_project_id() - quotas = self.quotas.get(project_id) - return getattr(quotas, self.cosmo_plural(obj_type_single)) - - -class KeystoneClientWithSugar(OpenStackClient): - # keystone does not have resource quota - KEYSTONE_INFINITE_RESOURCE_QUOTA = 10**9 - - def __init__(self, *args, **kw): - super(KeystoneClientWithSugar, self).__init__( - 'keystone_client', keystone_client.Client, *args, **kw) - - def cosmo_list(self, obj_type_single, **kw): - obj_type_plural = self.cosmo_plural(obj_type_single) - for obj in getattr(self, obj_type_plural).list(**kw): - yield obj - - def cosmo_delete_resource(self, obj_type_single, obj_id): - obj_type_plural = self.cosmo_plural(obj_type_single) - getattr(self, obj_type_plural).delete(obj_id) - - def get_id_from_resource(self, resource): - return resource.id - - def get_name_from_resource(self, resource): - return resource.name - - def get_quota(self, obj_type_single): - return self.KEYSTONE_INFINITE_RESOURCE_QUOTA - - -class GlanceClientWithSugar(OpenStackClient): - GLANCE_INIFINITE_RESOURCE_QUOTA = 10**9 - - def __init__(self, *args, **kw): - super(GlanceClientWithSugar, self).__init__( - 'glance_client', partial(glance_client.Client, '2'), *args, **kw) - - def cosmo_list(self, obj_type_single, **kw): - obj_type_plural = self.cosmo_plural(obj_type_single) - return getattr(self, obj_type_plural).list(filters=kw) - - def cosmo_delete_resource(self, obj_type_single, obj_id): - obj_type_plural = self.cosmo_plural(obj_type_single) - getattr(self, obj_type_plural).delete(obj_id) - - def get_id_from_resource(self, resource): - return resource.id - - def get_name_from_resource(self, resource): - return resource.name - - def get_quota(self, obj_type_single): - return self.GLANCE_INIFINITE_RESOURCE_QUOTA diff --git a/openstack_plugin_common/floatingip.py b/openstack_plugin_common/floatingip.py deleted file mode 100644 index 77125c21..00000000 --- a/openstack_plugin_common/floatingip.py +++ /dev/null @@ -1,90 +0,0 @@ -######### -# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - -from cloudify import ctx -from cloudify.exceptions import RecoverableError -from openstack_plugin_common import ( - delete_resource_and_runtime_properties, - use_external_resource, - validate_resource, - COMMON_RUNTIME_PROPERTIES_KEYS, - OPENSTACK_ID_PROPERTY, - OPENSTACK_TYPE_PROPERTY) - -FLOATINGIP_OPENSTACK_TYPE = 'floatingip' - -# Runtime properties -IP_ADDRESS_PROPERTY = 'floating_ip_address' # the actual ip address -RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS + \ - [IP_ADDRESS_PROPERTY] - - -def use_external_floatingip(client, ip_field_name, ext_fip_ip_extractor): - external_fip = use_external_resource( - ctx, client, FLOATINGIP_OPENSTACK_TYPE, ip_field_name) - if external_fip: - if not ctx.node.properties['allow_reallocation'] \ - and external_fip['status'] == 'ACTIVE': - raise RecoverableError( - 'Floating IP address {0} is already associated'.format( - external_fip['floating_ip_address']) - ) - ctx.instance.runtime_properties[IP_ADDRESS_PROPERTY] = \ - ext_fip_ip_extractor(external_fip) - return True - - return False - - -def set_floatingip_runtime_properties(fip_id, ip_address): - ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] = fip_id - ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] = \ - FLOATINGIP_OPENSTACK_TYPE - ctx.instance.runtime_properties[IP_ADDRESS_PROPERTY] = ip_address - - -def delete_floatingip(client, **kwargs): - delete_resource_and_runtime_properties(ctx, client, - RUNTIME_PROPERTIES_KEYS) - - -def floatingip_creation_validation(client, ip_field_name, **kwargs): - validate_resource(ctx, client, FLOATINGIP_OPENSTACK_TYPE, - ip_field_name) - - -def get_server_floating_ip(neutron_client, server_id): - - floating_ips = neutron_client.list_floatingips() - - floating_ips = floating_ips.get('floatingips') - if not floating_ips: - return None - - for floating_ip in floating_ips: - port_id = floating_ip.get('port_id') - if not port_id: - # this floating ip is not attached to any port - continue - - port = neutron_client.show_port(port_id)['port'] - device_id = port.get('device_id') - if not device_id: - # this port is not attached to any server - continue - - if server_id == device_id: - return floating_ip - return None diff --git a/openstack_plugin_common/security_group.py b/openstack_plugin_common/security_group.py deleted file mode 100644 index 0fa21aa1..00000000 --- a/openstack_plugin_common/security_group.py +++ /dev/null @@ -1,148 +0,0 @@ -######### -# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - -import copy -import re - -from cloudify import ctx -from cloudify.exceptions import NonRecoverableError - -from openstack_plugin_common import ( - get_resource_id, - use_external_resource, - delete_resource_and_runtime_properties, - validate_resource, - validate_ip_or_range_syntax, - OPENSTACK_ID_PROPERTY, - OPENSTACK_TYPE_PROPERTY, - OPENSTACK_NAME_PROPERTY, - COMMON_RUNTIME_PROPERTIES_KEYS -) - -SECURITY_GROUP_OPENSTACK_TYPE = 'security_group' - -# Runtime properties -RUNTIME_PROPERTIES_KEYS = COMMON_RUNTIME_PROPERTIES_KEYS - -NODE_NAME_RE = re.compile('^(.*)_.*$') # Anything before last underscore - - -def build_sg_data(args=None): - security_group = { - 'description': None, - 'name': get_resource_id(ctx, SECURITY_GROUP_OPENSTACK_TYPE), - } - - args = args or {} - security_group.update(ctx.node.properties['security_group'], **args) - - return security_group - - -def process_rules(client, sgr_default_values, cidr_field_name, - remote_group_field_name, min_port_field_name, - max_port_field_name): - rules_to_apply = ctx.node.properties['rules'] - security_group_rules = [] - for rule in rules_to_apply: - security_group_rules.append( - _process_rule(rule, client, sgr_default_values, cidr_field_name, - remote_group_field_name, min_port_field_name, - max_port_field_name)) - - return security_group_rules - - -def use_external_sg(client): - return use_external_resource(ctx, client, - SECURITY_GROUP_OPENSTACK_TYPE) - - -def set_sg_runtime_properties(sg, client): - ctx.instance.runtime_properties[OPENSTACK_ID_PROPERTY] =\ - client.get_id_from_resource(sg) - ctx.instance.runtime_properties[OPENSTACK_TYPE_PROPERTY] =\ - SECURITY_GROUP_OPENSTACK_TYPE - ctx.instance.runtime_properties[OPENSTACK_NAME_PROPERTY] = \ - client.get_name_from_resource(sg) - - -def delete_sg(client, **kwargs): - delete_resource_and_runtime_properties(ctx, client, - RUNTIME_PROPERTIES_KEYS) - - -def sg_creation_validation(client, cidr_field_name, **kwargs): - validate_resource(ctx, client, SECURITY_GROUP_OPENSTACK_TYPE) - - ctx.logger.debug('validating CIDR for rules with a {0} field'.format( - cidr_field_name)) - for rule in ctx.node.properties['rules']: - if cidr_field_name in rule: - validate_ip_or_range_syntax(ctx, rule[cidr_field_name]) - - -def _process_rule(rule, client, sgr_default_values, cidr_field_name, - remote_group_field_name, min_port_field_name, - max_port_field_name): - ctx.logger.debug( - "Security group rule before transformations: {0}".format(rule)) - - sgr = copy.deepcopy(sgr_default_values) - if 'port' in rule: - rule[min_port_field_name] = rule['port'] - rule[max_port_field_name] = rule['port'] - del rule['port'] - sgr.update(rule) - - if (remote_group_field_name in sgr) and sgr[remote_group_field_name]: - sgr[cidr_field_name] = None - elif ('remote_group_node' in sgr) and sgr['remote_group_node']: - _, remote_group_node = _capabilities_of_node_named( - sgr['remote_group_node']) - sgr[remote_group_field_name] = remote_group_node[OPENSTACK_ID_PROPERTY] - del sgr['remote_group_node'] - sgr[cidr_field_name] = None - elif ('remote_group_name' in sgr) and sgr['remote_group_name']: - sgr[remote_group_field_name] = \ - client.get_id_from_resource( - client.cosmo_get_named( - SECURITY_GROUP_OPENSTACK_TYPE, sgr['remote_group_name'])) - del sgr['remote_group_name'] - sgr[cidr_field_name] = None - - ctx.logger.debug( - "Security group rule after transformations: {0}".format(sgr)) - return sgr - - -def _capabilities_of_node_named(node_name): - result = None - caps = ctx.capabilities.get_all() - for node_id in caps: - match = NODE_NAME_RE.match(node_id) - if match: - candidate_node_name = match.group(1) - if candidate_node_name == node_name: - if result: - raise NonRecoverableError( - "More than one node named '{0}' " - "in capabilities".format(node_name)) - result = (node_id, caps[node_id]) - if not result: - raise NonRecoverableError( - "Could not find node named '{0}' " - "in capabilities".format(node_name)) - return result diff --git a/openstack_plugin_common/tests/openstack_client_tests.py b/openstack_plugin_common/tests/openstack_client_tests.py deleted file mode 100644 index b833582f..00000000 --- a/openstack_plugin_common/tests/openstack_client_tests.py +++ /dev/null @@ -1,881 +0,0 @@ -######## -# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - -import os -import unittest -import tempfile -import json -import __builtin__ as builtins - -import mock -from cloudify.exceptions import NonRecoverableError - -from cloudify.mocks import MockCloudifyContext -import openstack_plugin_common as common - - -class ConfigTests(unittest.TestCase): - - @mock.patch.dict('os.environ', clear=True) - def test__build_config_from_env_variables_empty(self): - cfg = common.Config._build_config_from_env_variables() - self.assertEqual({}, cfg) - - @mock.patch.dict('os.environ', clear=True, - OS_AUTH_URL='test_url') - def test__build_config_from_env_variables_single(self): - cfg = common.Config._build_config_from_env_variables() - self.assertEqual({'auth_url': 'test_url'}, cfg) - - @mock.patch.dict('os.environ', clear=True, - OS_AUTH_URL='test_url', - OS_PASSWORD='pass', - OS_REGION_NAME='region') - def test__build_config_from_env_variables_multiple(self): - cfg = common.Config._build_config_from_env_variables() - self.assertEqual({ - 'auth_url': 'test_url', - 'password': 'pass', - 'region_name': 'region', - }, cfg) - - @mock.patch.dict('os.environ', clear=True, - OS_INVALID='invalid', - PASSWORD='pass', - os_region_name='region') - def test__build_config_from_env_variables_all_ignored(self): - cfg = common.Config._build_config_from_env_variables() - self.assertEqual({}, cfg) - - @mock.patch.dict('os.environ', clear=True, - OS_AUTH_URL='test_url', - OS_PASSWORD='pass', - OS_REGION_NAME='region', - OS_INVALID='invalid', - PASSWORD='pass', - os_region_name='region') - def test__build_config_from_env_variables_extract_valid(self): - cfg = common.Config._build_config_from_env_variables() - self.assertEqual({ - 'auth_url': 'test_url', - 'password': 'pass', - 'region_name': 'region', - }, cfg) - - def test_update_config_empty_target(self): - target = {} - override = {'k1': 'u1'} - result = override.copy() - - common.Config.update_config(target, override) - self.assertEqual(result, target) - - def test_update_config_empty_override(self): - target = {'k1': 'v1'} - override = {} - result = target.copy() - - common.Config.update_config(target, override) - self.assertEqual(result, target) - - def test_update_config_disjoint_configs(self): - target = {'k1': 'v1'} - override = {'k2': 'u2'} - result = target.copy() - result.update(override) - - common.Config.update_config(target, override) - self.assertEqual(result, target) - - def test_update_config_do_not_remove_empty_from_target(self): - target = {'k1': ''} - override = {} - result = target.copy() - - common.Config.update_config(target, override) - self.assertEqual(result, target) - - def test_update_config_no_empty_in_override(self): - target = {'k1': 'v1', 'k2': 'v2'} - override = {'k1': 'u2'} - result = target.copy() - result.update(override) - - common.Config.update_config(target, override) - self.assertEqual(result, target) - - def test_update_config_all_empty_in_override(self): - target = {'k1': '', 'k2': 'v2'} - override = {'k1': '', 'k3': ''} - result = target.copy() - - common.Config.update_config(target, override) - self.assertEqual(result, target) - - def test_update_config_misc(self): - target = {'k1': 'v1', 'k2': 'v2'} - override = {'k1': '', 'k2': 'u2', 'k3': '', 'k4': 'u4'} - result = {'k1': 'v1', 'k2': 'u2', 'k4': 'u4'} - - common.Config.update_config(target, override) - self.assertEqual(result, target) - - @mock.patch.object(common.Config, 'update_config') - @mock.patch.object(common.Config, '_build_config_from_env_variables', - return_value={}) - @mock.patch.dict('os.environ', clear=True, - values={common.Config.OPENSTACK_CONFIG_PATH_ENV_VAR: - '/this/should/not/exist.json'}) - def test_get_missing_static_config_missing_file(self, from_env, update): - cfg = common.Config.get() - self.assertEqual({}, cfg) - from_env.assert_called_once_with() - update.assert_not_called() - - @mock.patch.object(common.Config, 'update_config') - @mock.patch.object(common.Config, '_build_config_from_env_variables', - return_value={}) - def test_get_empty_static_config_present_file(self, from_env, update): - file_cfg = {'k1': 'v1', 'k2': 'v2'} - env_var = common.Config.OPENSTACK_CONFIG_PATH_ENV_VAR - file = tempfile.NamedTemporaryFile(delete=False) - json.dump(file_cfg, file) - file.close() - - with mock.patch.dict('os.environ', {env_var: file.name}, clear=True): - common.Config.get() - - os.unlink(file.name) - from_env.assert_called_once_with() - update.assert_called_once_with({}, file_cfg) - - @mock.patch.object(common.Config, 'update_config') - @mock.patch.object(common.Config, '_build_config_from_env_variables', - return_value={'k1': 'v1'}) - def test_get_present_static_config_empty_file(self, from_env, update): - file_cfg = {} - env_var = common.Config.OPENSTACK_CONFIG_PATH_ENV_VAR - file = tempfile.NamedTemporaryFile(delete=False) - json.dump(file_cfg, file) - file.close() - - with mock.patch.dict('os.environ', {env_var: file.name}, clear=True): - common.Config.get() - - os.unlink(file.name) - from_env.assert_called_once_with() - update.assert_called_once_with({'k1': 'v1'}, file_cfg) - - @mock.patch.object(common.Config, 'update_config') - @mock.patch.object(common.Config, '_build_config_from_env_variables', - return_value={'k1': 'v1'}) - @mock.patch.dict('os.environ', clear=True, - values={common.Config.OPENSTACK_CONFIG_PATH_ENV_VAR: - '/this/should/not/exist.json'}) - def test_get_present_static_config_missing_file(self, from_env, update): - cfg = common.Config.get() - self.assertEqual({'k1': 'v1'}, cfg) - from_env.assert_called_once_with() - update.assert_not_called() - - @mock.patch.object(common.Config, 'update_config') - @mock.patch.object(common.Config, '_build_config_from_env_variables', - return_value={'k1': 'v1'}) - def test_get_all_present(self, from_env, update): - file_cfg = {'k2': 'u2'} - env_var = common.Config.OPENSTACK_CONFIG_PATH_ENV_VAR - file = tempfile.NamedTemporaryFile(delete=False) - json.dump(file_cfg, file) - file.close() - - with mock.patch.dict('os.environ', {env_var: file.name}, clear=True): - common.Config.get() - - os.unlink(file.name) - from_env.assert_called_once_with() - update.assert_called_once_with({'k1': 'v1'}, file_cfg) - - -class OpenstackClientTests(unittest.TestCase): - - def test__merge_custom_configuration_no_custom_cfg(self): - cfg = {'k1': 'v1'} - new = common.OpenStackClient._merge_custom_configuration(cfg, "dummy") - self.assertEqual(cfg, new) - - def test__merge_custom_configuration_client_present(self): - cfg = { - 'k1': 'v1', - 'k2': 'v2', - 'custom_configuration': { - 'dummy': { - 'k2': 'u2', - 'k3': 'u3' - } - } - } - result = { - 'k1': 'v1', - 'k2': 'u2', - 'k3': 'u3' - } - bak = cfg.copy() - new = common.OpenStackClient._merge_custom_configuration(cfg, "dummy") - self.assertEqual(result, new) - self.assertEqual(cfg, bak) - - def test__merge_custom_configuration_client_missing(self): - cfg = { - 'k1': 'v1', - 'k2': 'v2', - 'custom_configuration': { - 'dummy': { - 'k2': 'u2', - 'k3': 'u3' - } - } - } - result = { - 'k1': 'v1', - 'k2': 'v2' - } - bak = cfg.copy() - new = common.OpenStackClient._merge_custom_configuration(cfg, "baddy") - self.assertEqual(result, new) - self.assertEqual(cfg, bak) - - def test__merge_custom_configuration_multi_client(self): - cfg = { - 'k1': 'v1', - 'k2': 'v2', - 'custom_configuration': { - 'dummy': { - 'k2': 'u2', - 'k3': 'u3' - }, - 'bummy': { - 'k1': 'z1' - } - } - } - result = { - 'k1': 'z1', - 'k2': 'v2', - } - bak = cfg.copy() - new = common.OpenStackClient._merge_custom_configuration(cfg, "bummy") - self.assertEqual(result, new) - self.assertEqual(cfg, bak) - - @mock.patch.object(common, 'ctx') - def test__merge_custom_configuration_nova_url(self, mock_ctx): - cfg = { - 'nova_url': 'gopher://nova', - } - bak = cfg.copy() - - self.assertEqual( - common.OpenStackClient._merge_custom_configuration( - cfg, 'nova_client'), - {'endpoint_override': 'gopher://nova'}, - ) - self.assertEqual( - common.OpenStackClient._merge_custom_configuration( - cfg, 'dummy'), - {}, - ) - self.assertEqual(cfg, bak) - mock_ctx.logger.warn.assert_has_calls([ - mock.call( - "'nova_url' property is deprecated. Use `custom_configuration." - "nova_client.endpoint_override` instead."), - mock.call( - "'nova_url' property is deprecated. Use `custom_configuration." - "nova_client.endpoint_override` instead."), - ]) - - @mock.patch('keystoneauth1.session.Session') - def test___init___multi_region(self, m_session): - mock_client_class = mock.MagicMock() - - cfg = { - 'auth_url': 'test-auth_url/v3', - 'region': 'test-region', - } - - with mock.patch.object( - builtins, 'open', - mock.mock_open( - read_data=""" - { - "region": "region from file", - "other": "this one should get through" - } - """ - ), - create=True, - ): - common.OpenStackClient('fred', mock_client_class, cfg) - - mock_client_class.assert_called_once_with( - region_name='test-region', - other='this one should get through', - session=m_session.return_value, - ) - - def test__validate_auth_params_missing(self): - with self.assertRaises(NonRecoverableError): - common.OpenStackClient._validate_auth_params({}) - - def test__validate_auth_params_too_much(self): - with self.assertRaises(NonRecoverableError): - common.OpenStackClient._validate_auth_params({ - 'auth_url': 'url', - 'password': 'pass', - 'username': 'user', - 'tenant_name': 'tenant', - 'project_id': 'project_test', - }) - - def test__validate_auth_params_v2(self): - common.OpenStackClient._validate_auth_params({ - 'auth_url': 'url', - 'password': 'pass', - 'username': 'user', - 'tenant_name': 'tenant', - }) - - def test__validate_auth_params_v3(self): - common.OpenStackClient._validate_auth_params({ - 'auth_url': 'url', - 'password': 'pass', - 'username': 'user', - 'project_id': 'project_test', - 'user_domain_name': 'user_domain', - }) - - def test__validate_auth_params_v3_mod(self): - common.OpenStackClient._validate_auth_params({ - 'auth_url': 'url', - 'password': 'pass', - 'username': 'user', - 'user_domain_name': 'user_domain', - 'project_name': 'project_test_name', - 'project_domain_name': 'project_domain', - }) - - def test__validate_auth_params_skip_insecure(self): - common.OpenStackClient._validate_auth_params({ - 'auth_url': 'url', - 'password': 'pass', - 'username': 'user', - 'user_domain_name': 'user_domain', - 'project_name': 'project_test_name', - 'project_domain_name': 'project_domain', - 'insecure': True - }) - - def test__split_config(self): - auth = {'auth_url': 'url', 'password': 'pass'} - misc = {'misc1': 'val1', 'misc2': 'val2'} - all = dict(auth) - all.update(misc) - - a, m = common.OpenStackClient._split_config(all) - - self.assertEqual(auth, a) - self.assertEqual(misc, m) - - @mock.patch.object(common, 'loading') - @mock.patch.object(common, 'session') - def test__authenticate_secure(self, mock_session, mock_loading): - auth_params = {'k1': 'v1'} - common.OpenStackClient._authenticate(auth_params) - loader = mock_loading.get_plugin_loader.return_value - loader.load_from_options.assert_called_once_with(k1='v1') - auth = loader.load_from_options.return_value - mock_session.Session.assert_called_once_with(auth=auth, verify=True) - - @mock.patch.object(common, 'loading') - @mock.patch.object(common, 'session') - def test__authenticate_secure_explicit(self, mock_session, mock_loading): - auth_params = {'k1': 'v1', 'insecure': False} - common.OpenStackClient._authenticate(auth_params) - loader = mock_loading.get_plugin_loader.return_value - loader.load_from_options.assert_called_once_with(k1='v1') - auth = loader.load_from_options.return_value - mock_session.Session.assert_called_once_with(auth=auth, verify=True) - - @mock.patch.object(common, 'loading') - @mock.patch.object(common, 'session') - def test__authenticate_insecure(self, mock_session, mock_loading): - auth_params = {'k1': 'v1', 'insecure': True} - common.OpenStackClient._authenticate(auth_params) - loader = mock_loading.get_plugin_loader.return_value - loader.load_from_options.assert_called_once_with(k1='v1') - auth = loader.load_from_options.return_value - mock_session.Session.assert_called_once_with(auth=auth, verify=False) - - @mock.patch.object(common, 'loading') - @mock.patch.object(common, 'session') - def test__authenticate_secure_misc(self, mock_session, mock_loading): - params = {'k1': 'v1'} - tests = ('', 'a', [], {}, set(), 4, 0, -1, 3.14, 0.0, None) - for test in tests: - auth_params = params.copy() - auth_params['insecure'] = test - - common.OpenStackClient._authenticate(auth_params) - loader = mock_loading.get_plugin_loader.return_value - loader.load_from_options.assert_called_with(**params) - auth = loader.load_from_options.return_value - mock_session.Session.assert_called_with(auth=auth, verify=True) - - @mock.patch.object(common, 'cinder_client') - def test_cinder_client_get_name_from_resource(self, cc_mock): - ccws = common.CinderClientWithSugar() - mock_volume = mock.Mock() - - self.assertIs( - mock_volume.name, - ccws.get_name_from_resource(mock_volume)) - - -class ClientsConfigTest(unittest.TestCase): - - def setUp(self): - file = tempfile.NamedTemporaryFile(delete=False) - json.dump(self.get_file_cfg(), file) - file.close() - self.addCleanup(os.unlink, file.name) - - env_cfg = self.get_env_cfg() - env_cfg[common.Config.OPENSTACK_CONFIG_PATH_ENV_VAR] = file.name - mock.patch.dict('os.environ', env_cfg, clear=True).start() - - self.loading = mock.patch.object(common, 'loading').start() - self.session = mock.patch.object(common, 'session').start() - self.nova = mock.patch.object(common, 'nova_client').start() - self.neutron = mock.patch.object(common, 'neutron_client').start() - self.cinder = mock.patch.object(common, 'cinder_client').start() - self.addCleanup(mock.patch.stopall) - - self.loader = self.loading.get_plugin_loader.return_value - self.auth = self.loader.load_from_options.return_value - - -class CustomConfigFromInputs(ClientsConfigTest): - - def get_file_cfg(self): - return { - 'username': 'file-username', - 'password': 'file-password', - 'tenant_name': 'file-tenant-name', - 'custom_configuration': { - 'nova_client': { - 'username': 'custom-username', - 'password': 'custom-password', - 'tenant_name': 'custom-tenant-name' - }, - } - } - - def get_inputs_cfg(self): - return { - 'auth_url': 'envar-auth-url', - 'username': 'inputs-username', - 'custom_configuration': { - 'neutron_client': { - 'password': 'inputs-custom-password' - }, - 'cinder_client': { - 'password': 'inputs-custom-password', - 'auth_url': 'inputs-custom-auth-url', - 'extra_key': 'extra-value' - }, - } - } - - def get_env_cfg(self): - return { - 'OS_USERNAME': 'envar-username', - 'OS_PASSWORD': 'envar-password', - 'OS_TENANT_NAME': 'envar-tenant-name', - 'OS_AUTH_URL': 'envar-auth-url', - common.Config.OPENSTACK_CONFIG_PATH_ENV_VAR: file.name - } - - def test_nova(self): - common.NovaClientWithSugar(config=self.get_inputs_cfg()) - self.loader.load_from_options.assert_called_once_with( - username='inputs-username', - password='file-password', - tenant_name='file-tenant-name', - auth_url='envar-auth-url' - ) - self.session.Session.assert_called_with(auth=self.auth, verify=True) - self.nova.Client.assert_called_once_with( - '2', session=self.session.Session.return_value) - - def test_neutron(self): - common.NeutronClientWithSugar(config=self.get_inputs_cfg()) - self.loader.load_from_options.assert_called_once_with( - username='inputs-username', - password='inputs-custom-password', - tenant_name='file-tenant-name', - auth_url='envar-auth-url' - ) - self.session.Session.assert_called_with(auth=self.auth, verify=True) - self.neutron.Client.assert_called_once_with( - session=self.session.Session.return_value) - - def test_cinder(self): - common.CinderClientWithSugar(config=self.get_inputs_cfg()) - self.loader.load_from_options.assert_called_once_with( - username='inputs-username', - password='inputs-custom-password', - tenant_name='file-tenant-name', - auth_url='inputs-custom-auth-url' - ) - self.session.Session.assert_called_with(auth=self.auth, verify=True) - self.cinder.Client.assert_called_once_with( - '2', session=self.session.Session.return_value, - extra_key='extra-value') - - -class CustomConfigFromFile(ClientsConfigTest): - - def get_file_cfg(self): - return { - 'username': 'file-username', - 'password': 'file-password', - 'tenant_name': 'file-tenant-name', - 'custom_configuration': { - 'nova_client': { - 'username': 'custom-username', - 'password': 'custom-password', - 'tenant_name': 'custom-tenant-name' - }, - } - } - - def get_inputs_cfg(self): - return { - 'auth_url': 'envar-auth-url', - 'username': 'inputs-username', - } - - def get_env_cfg(self): - return { - 'OS_USERNAME': 'envar-username', - 'OS_PASSWORD': 'envar-password', - 'OS_TENANT_NAME': 'envar-tenant-name', - 'OS_AUTH_URL': 'envar-auth-url', - common.Config.OPENSTACK_CONFIG_PATH_ENV_VAR: file.name - } - - def test_nova(self): - common.NovaClientWithSugar(config=self.get_inputs_cfg()) - self.loader.load_from_options.assert_called_once_with( - username='custom-username', - password='custom-password', - tenant_name='custom-tenant-name', - auth_url='envar-auth-url' - ) - self.session.Session.assert_called_with(auth=self.auth, verify=True) - self.nova.Client.assert_called_once_with( - '2', session=self.session.Session.return_value) - - def test_neutron(self): - common.NeutronClientWithSugar(config=self.get_inputs_cfg()) - self.loader.load_from_options.assert_called_once_with( - username='inputs-username', - password='file-password', - tenant_name='file-tenant-name', - auth_url='envar-auth-url' - ) - self.session.Session.assert_called_with(auth=self.auth, verify=True) - self.neutron.Client.assert_called_once_with( - session=self.session.Session.return_value) - - def test_cinder(self): - common.CinderClientWithSugar(config=self.get_inputs_cfg()) - self.loader.load_from_options.assert_called_once_with( - username='inputs-username', - password='file-password', - tenant_name='file-tenant-name', - auth_url='envar-auth-url' - ) - self.session.Session.assert_called_with(auth=self.auth, verify=True) - self.cinder.Client.assert_called_once_with( - '2', session=self.session.Session.return_value) - - -class PutClientInKwTests(unittest.TestCase): - - def test_override_prop_empty_ctx(self): - props = {} - ctx = MockCloudifyContext(node_id='a20846', properties=props) - kwargs = { - 'ctx': ctx, - 'openstack_config': { - 'p1': 'v1' - } - } - expected_cfg = kwargs['openstack_config'] - - client_class = mock.MagicMock() - common._handle_kw('mock_client', client_class, kwargs) - client_class.assert_called_once_with(config=expected_cfg) - - def test_override_prop_nonempty_ctx(self): - props = { - 'openstack_config': { - 'p1': 'u1', - 'p2': 'u2' - } - } - props_copy = props.copy() - ctx = MockCloudifyContext(node_id='a20846', properties=props) - kwargs = { - 'ctx': ctx, - 'openstack_config': { - 'p1': 'v1', - 'p3': 'v3' - } - } - expected_cfg = { - 'p1': 'v1', - 'p2': 'u2', - 'p3': 'v3' - } - - client_class = mock.MagicMock() - common._handle_kw('mock_client', client_class, kwargs) - client_class.assert_called_once_with(config=expected_cfg) - # Making sure that _handle_kw will not modify - # 'openstack_config' property of a node. - self.assertEqual(props_copy, ctx.node.properties) - - def test_override_runtime_prop(self): - props = { - 'openstack_config': { - 'p1': 'u1', - 'p2': 'u2' - } - } - runtime_props = { - 'openstack_config': { - 'p1': 'u3' - } - } - props_copy = props.copy() - runtime_props_copy = runtime_props.copy() - ctx = MockCloudifyContext(node_id='a20847', properties=props, - runtime_properties=runtime_props) - kwargs = { - 'ctx': ctx - } - expected_cfg = { - 'p1': 'u3', - 'p2': 'u2' - } - client_class = mock.MagicMock() - common._handle_kw('mock_client', client_class, kwargs) - client_class.assert_called_once_with(config=expected_cfg) - self.assertEqual(props_copy, ctx.node.properties) - self.assertEqual(runtime_props_copy, ctx.instance.runtime_properties) - - def test_resource_id_runtime_prop(self): - props = { - 'openstack_config': { - 'p1': 'u1', - 'p2': 'u2' - } - } - runtime_props = { - 'openstack_config': { - 'p1': 'u3' - } - } - props_copy = props.copy() - runtime_props_copy = runtime_props.copy() - ctx = MockCloudifyContext(node_id='a20847', properties=props, - runtime_properties=runtime_props) - kwargs = { - 'ctx': ctx, - 'resource_id': 'test-id' - } - runtime_props_copy.update( - {common.OPENSTACK_ID_PROPERTY: 'test-id'}) - expected_cfg = { - 'p1': 'u3', - 'p2': 'u2' - } - client_class = mock.MagicMock() - common._handle_kw('mock_client', client_class, kwargs) - client_class.assert_called_once_with(config=expected_cfg) - self.assertEqual(props_copy, ctx.node.properties) - self.assertEqual(runtime_props_copy, ctx.instance.runtime_properties) - - -class ResourceQuotaTests(unittest.TestCase): - - def _test_quota_validation(self, amount, quota, failure_expected): - ctx = MockCloudifyContext(node_id='node_id', properties={}) - client = mock.MagicMock() - - def mock_cosmo_list(_): - return [x for x in range(0, amount)] - client.cosmo_list = mock_cosmo_list - - def mock_get_quota(_): - return quota - client.get_quota = mock_get_quota - - if failure_expected: - self.assertRaisesRegexp( - NonRecoverableError, - 'cannot be created due to quota limitations', - common.validate_resource, - ctx=ctx, sugared_client=client, - openstack_type='openstack_type') - else: - common.validate_resource( - ctx=ctx, sugared_client=client, - openstack_type='openstack_type') - - def test_equals_quotas(self): - self._test_quota_validation(3, 3, True) - - def test_exceeded_quota(self): - self._test_quota_validation(5, 3, True) - - def test_infinite_quota(self): - self._test_quota_validation(5, -1, False) - - -class UseExternalResourceTests(unittest.TestCase): - - def _test_use_external_resource(self, - is_external, - create_if_missing, - exists): - properties = {'create_if_missing': create_if_missing, - 'use_external_resource': is_external, - 'resource_id': 'resource_id'} - client_mock = mock.MagicMock() - os_type = 'test' - - def _raise_error(*_): - raise NonRecoverableError('Error') - - def _return_something(*_): - return mock.MagicMock() - - return_value = _return_something if exists else _raise_error - if exists: - properties.update({'resource_id': 'rid'}) - - node_context = MockCloudifyContext(node_id='a20847', - properties=properties) - with mock.patch( - 'openstack_plugin_common._get_resource_by_name_or_id_from_ctx', - new=return_value): - return common.use_external_resource(node_context, - client_mock, os_type) - - def test_use_existing_resource(self): - self.assertIsNotNone(self._test_use_external_resource(True, True, - True)) - self.assertIsNotNone(self._test_use_external_resource(True, False, - True)) - - def test_create_resource(self): - self.assertIsNone(self._test_use_external_resource(False, True, False)) - self.assertIsNone(self._test_use_external_resource(False, False, - False)) - self.assertIsNone(self._test_use_external_resource(True, True, False)) - - def test_raise_error(self): - # If exists and shouldn't it is checked in resource - # validation so below scenario is not tested here - self.assertRaises(NonRecoverableError, - self._test_use_external_resource, - is_external=True, - create_if_missing=False, - exists=False) - - -class ValidateResourceTests(unittest.TestCase): - - def _test_validate_resource(self, - is_external, - create_if_missing, - exists, - client_mock_provided=None): - properties = {'create_if_missing': create_if_missing, - 'use_external_resource': is_external, - 'resource_id': 'resource_id'} - client_mock = client_mock_provided or mock.MagicMock() - os_type = 'test' - - def _raise_error(*_): - raise NonRecoverableError('Error') - - def _return_something(*_): - return mock.MagicMock() - return_value = _return_something if exists else _raise_error - if exists: - properties.update({'resource_id': 'rid'}) - - node_context = MockCloudifyContext(node_id='a20847', - properties=properties) - with mock.patch( - 'openstack_plugin_common._get_resource_by_name_or_id_from_ctx', - new=return_value): - return common.validate_resource(node_context, client_mock, os_type) - - def test_use_existing_resource(self): - self._test_validate_resource(True, True, True) - self._test_validate_resource(True, False, True) - - def test_create_resource(self): - client_mock = mock.MagicMock() - client_mock.cosmo_list.return_value = ['a', 'b', 'c'] - client_mock.get_quota.return_value = 5 - self._test_validate_resource(False, True, False, client_mock) - self._test_validate_resource(False, False, False, client_mock) - self._test_validate_resource(True, True, False, client_mock) - - def test_raise_error(self): - # If exists and shouldn't it is checked in resource - # validation so below scenario is not tested here - self.assertRaises(NonRecoverableError, - self._test_validate_resource, - is_external=True, - create_if_missing=False, - exists=False) - - def test_raise_quota_error(self): - client_mock = mock.MagicMock() - client_mock.cosmo_list.return_value = ['a', 'b', 'c'] - client_mock.get_quota.return_value = 3 - self.assertRaises(NonRecoverableError, - self._test_validate_resource, - is_external=True, - create_if_missing=True, - exists=False, - client_mock_provided=client_mock) diff --git a/openstack_plugin_common/tests/provider-context.json b/openstack_plugin_common/tests/provider-context.json deleted file mode 100644 index f7e20e4e..00000000 --- a/openstack_plugin_common/tests/provider-context.json +++ /dev/null @@ -1,78 +0,0 @@ -{ - "context": { - "resources": { - "management_keypair": { - "name": "p2_cloudify-manager-kp-ilya", - "id": "p2_cloudify-manager-kp-ilya", - "type": "keypair", - "external_resource": true - }, - "router": { - "name": "p2_cloudify-router", - "id": "856f9fb8-6676-4b99-b64d-b76874b30abf", - "type": "router", - "external_resource": true - }, - "subnet": { - "name": "p2_cloudify-admin-network-subnet", - "id": "dd193491-d728-4e3e-8199-27eec0ba18e4", - "type": "subnet", - "external_resource": true - }, - "int_network": { - "name": "p2_cloudify-admin-network", - "id": "27ef2770-5219-4bb1-81d4-14ed450c5181", - "type": "network", - "external_resource": true - }, - "management_server": { - "name": "p2_cfy-mgr-ilya-2014-06-01-11:59", - "id": "be9991da-9c34-4f7c-9c33-5e04ad2d5b3e", - "type": "server", - "external_resource": false - }, - "agents_security_group": { - "name": "p2_cloudify-sg-agents", - "id": "d52280aa-0e79-4697-bd08-baf3f84e2a10", - "type": "neutron security group", - "external_resource": true - }, - "agents_keypair": { - "name": "p2_cloudify-agents-kp-ilya", - "id": "p2_cloudify-agents-kp-ilya", - "type": "keypair", - "external_resource": true - }, - "management_security_group": { - "name": "p2_cloudify-sg-management", - "id": "5862e0d2-8f28-472e-936b-d2da9cb935b3", - "type": "neutron security group", - "external_resource": true - }, - "floating_ip": { - "external_resource": true, - "id": "None", - "type": "floating ip", - "ip": "CENSORED" - }, - "ext_network": { - "name": "Ext-Net", - "id": "7da74520-9d5e-427b-a508-213c84e69616", - "type": "network", - "external_resource": true - } - }, - "cloudify": { - "resources_prefix": "p2_", - "cloudify_agent": { - "user": "ubuntu", - "agent_key_path": "/PATH/CENSORED/p2_cloudify-agents-kp-ilya.pem", - "min_workers": 2, - "max_workers": 5, - "remote_execution_port": 22 - } - } - }, - "name": "cloudify_openstack" -} - diff --git a/openstack_plugin_common/tests/test.py b/openstack_plugin_common/tests/test.py deleted file mode 100644 index 13099292..00000000 --- a/openstack_plugin_common/tests/test.py +++ /dev/null @@ -1,40 +0,0 @@ -import json -import os - -from cloudify.context import BootstrapContext - -from cloudify.mocks import MockCloudifyContext - - -RETRY_AFTER = 1 -# Time during which no retry could possibly happen. -NO_POSSIBLE_RETRY_TIME = RETRY_AFTER / 2.0 - -BOOTSTRAP_CONTEXTS_WITHOUT_PREFIX = ( - { - }, - { - 'resources_prefix': '' - }, - { - 'resources_prefix': None - }, -) - - -def set_mock_provider_context(ctx, provider_context): - - def mock_provider_context(provider_name_unused): - return provider_context - - ctx.get_provider_context = mock_provider_context - - -def create_mock_ctx_with_provider_info(*args, **kw): - cur_dir = os.path.dirname(os.path.realpath(__file__)) - full_file_name = os.path.join(cur_dir, 'provider-context.json') - with open(full_file_name) as f: - provider_context = json.loads(f.read())['context'] - kw['provider_context'] = provider_context - kw['bootstrap_context'] = BootstrapContext(provider_context['cloudify']) - return MockCloudifyContext(*args, **kw) diff --git a/openstack_sdk/__init__.py b/openstack_sdk/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/openstack_sdk/common.py b/openstack_sdk/common.py new file mode 100644 index 00000000..fa7b3707 --- /dev/null +++ b/openstack_sdk/common.py @@ -0,0 +1,94 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Standard imports +import uuid + +# Third party imports +import openstack + + +class QuotaException(Exception): + pass + + +class OpenstackResource(object): + service_type = None + resource_type = None + + def __init__(self, client_config, resource_config=None, logger=None): + self.client_config = client_config + self.connection = openstack.connect(**client_config) + self.config = resource_config or {} + self.name = self.config.get('name') + self.resource_id =\ + None if 'id' not in self.config else self.config['id'] + self.logger = logger + + def __str__(self): + return self.name if not self.resource_id else self.resource_id + + def validate_resource_identifier(self): + """ + This method will validate the resource identifier whenever the + "use_external_resource" set "True", so it will check if resource + "id" or "name" contains a valid value before start any operation + :return: error_message in case the resource identifier is invalid + """ + error_message = None + if not (self.name or self.resource_id): + error_message = 'Resource id & name cannot be both empty' + + if self.resource_id: + try: + uuid.UUID(self.resource_id) + except ValueError: + # If it's a value error, then the string + # is not a valid hex code for a UUID. + error_message = 'Invalid resource id: {0}' \ + ''.format(self.resource_id) + + elif self.name and not isinstance(self.name, basestring): + error_message = 'Invalid resource name: {0} ' \ + 'this should be a string'.format(self.name) + + return error_message + + def get_quota_sets(self, quota_type): + project_name = self.client_config.get('project_name') + quota = getattr( + self.connection, + 'get_{0}_quotas'.format(self.service_type))(project_name) + + if not quota: + raise QuotaException( + 'Invalid {0} quota response'.format(self.service_type)) + + return getattr(quota, quota_type) + + def resource_plural(self, openstack_type): + return '{0}s'.format(openstack_type) + + def list(self): + raise NotImplementedError() + + def get(self): + raise NotImplementedError() + + def create(self): + raise NotImplementedError() + + def delete(self): + raise NotImplementedError() diff --git a/openstack_sdk/resources/__init__.py b/openstack_sdk/resources/__init__.py new file mode 100644 index 00000000..e05ea898 --- /dev/null +++ b/openstack_sdk/resources/__init__.py @@ -0,0 +1,23 @@ + +# Monkey patch the "get_server_password" because the the current method for +# openstacksdk https://bit.ly/2zxA3At assume there is an instance variable +# called "_session" and it failed with error that "Proxy" class does not +# have such variable + +from openstack.compute.v2 import _proxy as custom_proxy +from openstack.compute.v2 import server as _server + + +def get_server_password(self, server): + """Get the administrator password + + :param server: Either the ID of a server or a + :class:`~openstack.compute.v2.server.Server` instance. + + :returns: encrypted password. + """ + server = self._get_resource(_server.Server, server) + return server.get_password(self) + + +custom_proxy.Proxy.get_server_password = get_server_password diff --git a/openstack_sdk/resources/compute.py b/openstack_sdk/resources/compute.py new file mode 100644 index 00000000..e69ae973 --- /dev/null +++ b/openstack_sdk/resources/compute.py @@ -0,0 +1,447 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Based on this documentation: +# https://docs.openstack.org/openstacksdk/latest/user/proxies/compute.html. + +# Local imports +from openstack_sdk.common import OpenstackResource + + +class OpenstackServer(OpenstackResource): + service_type = 'compute' + resource_type = 'server' + + def list(self, details=True, all_projects=False, query=None): + query = query or {} + self.logger.debug('Attempting to list servers') + return self.connection.compute.servers(details, all_projects, **query) + + def get(self): + self.logger.debug( + 'Attempting to find this server: {0}'.format( + self.name if not self.resource_id else self.resource_id)) + server = self.connection.compute.get_server( + self.name if not self.resource_id else self.resource_id + ) + self.logger.debug( + 'Found server with this result: {0}'.format(server)) + return server + + def create(self): + self.logger.debug( + 'Attempting to create server with these args: {0}'.format( + self.config)) + server = self.connection.compute.create_server(**self.config) + self.logger.info( + 'Created server with this result: {0}'.format(server)) + return server + + def delete(self): + server = self.get() + self.logger.debug( + 'Attempting to delete this server: {0}'.format(server)) + result = self.connection.compute.delete_server(server) + self.logger.debug( + 'Deleted server with this result: {0}'.format(result)) + return result + + def reboot(self, reboot_type): + server = self.get() + self.logger.debug( + 'Attempting to reboot this server: {0}'.format(server)) + self.connection.compute.reboot_server(server, reboot_type) + + def resume(self): + server = self.get() + self.logger.debug( + 'Attempting to resume this server: {0}'.format(server)) + self.connection.compute.resume_server(server) + + def suspend(self): + server = self.get() + self.logger.debug( + 'Attempting to suspend this server: {0}'.format(server)) + self.connection.compute.suspend_server(server) + + def backup(self, name, backup_type, rotation): + server = self.get() + self.logger.debug( + 'Attempting to backup this server: {0}'.format(server)) + self.connection.compute.backup_server(server, + name, + backup_type, + rotation) + + def rebuild(self, image, name=None, admin_password='', **attr): + server = self.get() + name = name or server.name + attr['image'] = image + self.logger.debug( + 'Attempting to rebuild this server: {0}'.format(server)) + + self.connection.compute.rebuild_server(server, + name, + admin_password, + **attr) + + def create_image(self, name, metadata=None): + server = self.get() + self.logger.debug( + 'Attempting to create image for this server: {0}'.format(server)) + self.connection.compute.create_server_image( + server, name, metadata=metadata + ) + + def update(self, new_config=None): + server = self.get() + self.logger.debug( + 'Attempting to update this server: {0} with args {1}'.format( + server, new_config)) + result = self.connection.compute.update_server(server, **new_config) + self.logger.debug( + 'Updated server with this result: {0}'.format(result)) + return result + + def start(self): + server = self.get() + self.logger.debug( + 'Attempting to start this server: {0}'.format(server)) + self.connection.compute.start_server(server) + + def stop(self): + server = self.get() + self.logger.debug( + 'Attempting to stop this server: {0}'.format(server)) + self.connection.compute.stop_server(server) + + def get_server_password(self): + server = self.get() + self.logger.debug( + 'Attempting to get server' + ' password for this server: {0}'.format(server)) + return self.connection.compute.get_server_password(server) + + def list_volume_attachments(self, query=None): + query = query or {} + self.logger.debug('Attempting to list volumes attachments') + return self.connection.compute.volume_attachments(**query) + + def get_volume_attachment(self, attachment_id): + self.logger.debug( + 'Attempting to find this volume attachment: {0}' + ''.format(attachment_id)) + volume_attachment = \ + self.connection.compute.get_volume_attachment( + attachment_id, self.resource_id) + self.logger.debug( + 'Found volume attachment with this result: {0}' + ''.format(volume_attachment)) + return volume_attachment + + def create_volume_attachment(self, attachment_config): + self.logger.debug( + 'Attempting to create volume attachment' + ' with these args: {0}'.format(self.config)) + volume_attachment = \ + self.connection.compute.create_volume_attachment( + self.resource_id, **attachment_config) + self.logger.debug( + 'Created volume attachment with this result: {0}' + ''.format(volume_attachment)) + return volume_attachment + + def delete_volume_attachment(self, attachment_id): + self.logger.debug( + 'Attempting to delete this volume attachment: {0}' + ''.format(attachment_id)) + self.connection.compute.delete_volume_attachment(attachment_id, + self.resource_id) + self.logger.debug( + 'Volume attachment {0} was deleted successfully' + ''.format(attachment_id)) + + def create_server_interface(self, interface_config): + self.logger.debug( + 'Attempting to create server interface with these args:' + '{0}'.format(interface_config)) + result = \ + self.connection.compute.create_server_interface( + self.resource_id, **interface_config) + self.logger.debug( + 'Created server interface with this result: {0}'.format(result)) + return result + + def delete_server_interface(self, interface_id): + self.logger.debug( + 'Attempting to delete server interface with these args:' + '{0}'.format(interface_id)) + self.connection.compute.delete_server_interface( + interface_id, server=self.resource_id) + self.logger.debug( + 'Server interface {0} was deleted successfully' + ''.format(interface_id)) + + def get_server_interface(self, interface_id): + self.logger.debug( + 'Attempting to find this server interface: {0}' + ''.format(interface_id)) + server_interface = \ + self.connection.compute.get_server_interface( + interface_id, self.resource_id) + self.logger.debug( + 'Found server interface with this result: {0}' + ''.format(server_interface)) + return server_interface + + def server_interfaces(self): + self.logger.debug('Attempting to list server interfaces') + return self.connection.compute.server_interfaces(self.resource_id) + + def add_security_group_to_server(self, security_group_id): + self.logger.debug( + 'Attempting to add security group {0} to server {1}' + ''.format(security_group_id, self.resource_id)) + self.connection.compute.add_security_group_to_server( + self.resource_id, security_group_id) + self.logger.debug( + 'Security group {0} was added to server {1} ' + 'successfully'.format(security_group_id, self.resource_id)) + + def remove_security_group_from_server(self, security_group_id): + self.logger.debug( + 'Attempting to remove security group {0} from server {1}' + ''.format(security_group_id, self.resource_id)) + self.connection.compute.remove_security_group_from_server( + self.resource_id, security_group_id) + self.logger.debug( + 'Security group {0} was removed from server {1} ' + 'successfully'.format(security_group_id, self.resource_id)) + + def add_floating_ip_to_server(self, floating_ip, fixed_ip=None): + self.logger.debug( + 'Attempting to add floating ip {0} to server {1}' + ''.format(floating_ip, self.resource_id)) + self.connection.compute.add_floating_ip_to_server( + self.resource_id, floating_ip, fixed_address=fixed_ip) + self.logger.debug( + 'Floating ip {0} was added to server {1} successfully' + ''.format(floating_ip, self.resource_id)) + + def remove_floating_ip_from_server(self, floating_ip): + self.logger.debug( + 'Attempting to remove floating ip {0} from server {1}' + ''.format(floating_ip, self.resource_id)) + self.connection.compute.remove_floating_ip_from_server( + self.resource_id, floating_ip) + self.logger.debug( + 'Floating ip {0} was removed from server {1} ' + 'successfully'.format(floating_ip, self.resource_id)) + + +class OpenstackHostAggregate(OpenstackResource): + service_type = 'compute' + resource_type = 'aggregate' + + def list(self): + self.logger.debug('Attempting to list aggregates') + return self.connection.compute.aggregates() + + def get(self): + self.logger.debug( + 'Attempting to find this aggregate: {0}'.format( + self.name if not self.resource_id else self.resource_id)) + aggregate = self.connection.compute.get_aggregate( + self.name if not self.resource_id else self.resource_id + ) + self.logger.debug( + 'Found aggregate with this result: {0}'.format(aggregate)) + return aggregate + + def create(self): + self.logger.debug( + 'Attempting to create aggregate with these args: {0}'.format( + self.config)) + aggregate = self.connection.compute.create_aggregate(**self.config) + self.logger.debug( + 'Created aggregate with this result: {0}'.format(aggregate)) + return aggregate + + def update(self, new_config=None): + aggregate = self.get() + self.logger.debug( + 'Attempting to update this aggregate: {0} with args {1}'.format( + aggregate, new_config)) + result =\ + self.connection.compute.update_aggregate(aggregate, **new_config) + self.logger.debug( + 'Updated aggregate with this result: {0}'.format(result)) + return result + + def delete(self): + aggregate = self.get() + self.logger.debug( + 'Attempting to delete this aggregate: {0}'.format(aggregate)) + result = self.connection.compute.delete_aggregate(aggregate) + self.logger.debug( + 'Deleted aggregate with this result: {0}'.format(result)) + return result + + def set_metadata(self, metadata): + aggregate = self.get() + self.logger.debug( + 'Attempting to set metadata to this aggregate: {0}' + ''.format(aggregate)) + result = \ + self.connection.compute.set_aggregate_metadata(aggregate, metadata) + self.logger.debug( + 'Set metadata to aggregate with this result: {0}'.format( + result)) + return result + + def add_host(self, host): + aggregate = self.get() + self.logger.debug( + 'Attempting to add host to this aggregate: {0}' + ''.format(aggregate)) + result = self.connection.compute.add_host_to_aggregate(aggregate, host) + self.logger.debug( + 'Added host to aggregate with this result: {0}'.format(result)) + return result + + def remove_host(self, host): + aggregate = self.get() + self.logger.debug( + 'Attempting to delete this aggregate: {0}'.format(aggregate)) + result = \ + self.connection.compute.remove_host_from_aggregate(aggregate, host) + self.logger.debug( + 'Deleted host to aggregate with this result: {0}'.format(result)) + return result + + +class OpenstackServerGroup(OpenstackResource): + service_type = 'compute' + resource_type = 'server_group' + + def list(self, query=None): + query = query or {} + return self.connection.compute.server_groups(**query) + + def get(self): + self.logger.debug( + 'Attempting to find this server group: {0}'.format( + self.name if not self.resource_id else self.resource_id)) + server_group = self.connection.compute.get_server_group( + self.name if not self.resource_id else self.resource_id + ) + self.logger.debug( + 'Found server group with this result: {0}'.format(server_group)) + return server_group + + def create(self): + self.logger.debug( + 'Attempting to create server group with these args: {0}'.format( + self.config)) + server_group =\ + self.connection.compute.create_server_group(**self.config) + self.logger.debug( + 'Created server group with this result: {0}'.format(server_group)) + return server_group + + def delete(self): + server_group = self.get() + self.logger.debug( + 'Attempting to delete this server group: {0}'.format(server_group)) + result = self.connection.compute.delete_server_group(server_group) + self.logger.debug( + 'Deleted server group with this result: {0}'.format(result)) + return result + + +class OpenstackKeyPair(OpenstackResource): + service_type = 'compute' + resource_type = 'key_pair' + + def validate_resource_identifier(self): + return None + + def list(self): + return self.connection.compute.keypairs() + + def get(self): + name = self.name if not self.resource_id else self.resource_id + self.logger.debug( + 'Attempting to find this key pair: {0}'.format(name)) + key_pair = self.connection.compute.get_keypair(name) + self.logger.debug( + 'Found key pair with this result: {0}'.format(key_pair)) + return key_pair + + def create(self): + self.logger.debug( + 'Attempting to create key pair with these args: {0}'.format( + self.config)) + key_pair = self.connection.compute.create_keypair(**self.config) + self.logger.debug( + 'Created key pair with this result: {0}'.format(key_pair)) + return key_pair + + def delete(self): + key_pair = self.get() + self.logger.debug( + 'Attempting to delete this key pair: {0}'.format(key_pair)) + result = self.connection.compute.delete_keypair(key_pair) + self.logger.debug( + 'Deleted key pair with this result: {0}'.format(result)) + return result + + +class OpenstackFlavor(OpenstackResource): + service_type = 'compute' + resource_type = 'flavor' + + def list(self, details=True, query=None): + query = query or {} + return self.connection.compute.flavors(details, **query) + + def get(self): + self.logger.debug( + 'Attempting to find this flavor: {0}'.format( + self.name if not self.resource_id else self.resource_id)) + flavor = self.connection.compute.get_flavor( + self.name if not self.resource_id else self.resource_id + ) + self.logger.debug( + 'Found flavor with this result: {0}'.format(flavor)) + return flavor + + def create(self): + self.logger.debug( + 'Attempting to create flavor with these args: {0}'.format( + self.config)) + flavor = self.connection.compute.create_flavor(**self.config) + self.logger.debug( + 'Created flavor image with this result: {0}'.format(flavor)) + return flavor + + def delete(self): + flavor = self.get() + self.logger.debug( + 'Attempting to delete this flavor: {0}'.format(flavor)) + result = self.connection.compute.delete_flavor(flavor) + self.logger.debug( + 'Deleted flavor with this result: {0}'.format(result)) + return result diff --git a/openstack_sdk/resources/identity.py b/openstack_sdk/resources/identity.py new file mode 100644 index 00000000..db211d89 --- /dev/null +++ b/openstack_sdk/resources/identity.py @@ -0,0 +1,189 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Based on this documentation: +# https://docs.openstack.org/openstacksdk/latest/user/proxies/compute.html. + +# Local imports +from openstack_sdk.common import OpenstackResource + + +class OpenstackUser(OpenstackResource): + service_type = 'identity' + resource_type = 'user' + + def list(self, query=None): + query = query or {} + return self.connection.identity.users(**query) + + def get(self): + self.logger.debug( + 'Attempting to find this user: {0}'.format( + self.name if not self.resource_id else self.resource_id)) + user = self.connection.identity.get_user( + self.name if not self.resource_id else self.resource_id + ) + self.logger.debug('Found user with this result: {0}'.format(user)) + return user + + def find_user(self, name_or_id): + self.logger.debug( + 'Attempting to find this user: {0}'.format( + self.name if not self.resource_id else self.resource_id)) + user = self.connection.identity.find_user(name_or_id) + self.logger.debug('Found user with this result: {0}'.format(user)) + return user + + def create(self): + self.logger.debug( + 'Attempting to create user with these args: {0}'.format( + self.config)) + user = self.connection.identity.create_user(**self.config) + self.logger.debug('Created user with this result: {0}'.format(user)) + return user + + def delete(self): + user = self.get() + self.logger.debug('Attempting to delete this user: {0}'.format(user)) + result = self.connection.identity.delete_user(user) + self.logger.debug('Deleted user with this result: {0}'.format(result)) + return result + + def update(self, new_config=None): + user = self.get() + self.logger.debug( + 'Attempting to update this user: {0} with args {1}'.format( + user, new_config)) + result = self.connection.identity.update_user(user, **new_config) + self.logger.debug('Updated user with this result: {0}'.format(result)) + return result + + +class OpenstackRole(OpenstackResource): + service_type = 'identity' + resource_type = 'role' + + def list(self, query=None): + query = query or {} + return self.connection.identity.roles(**query) + + def get(self): + self.logger.debug( + 'Attempting to find this role: {0}'.format( + self.name if not self.resource_id else self.resource_id)) + role = self.connection.identity.get_role( + self.name if not self.resource_id else self.resource_id + ) + self.logger.debug('Found role with this result: {0}'.format(role)) + return role + + def find_role(self, name_or_id): + self.logger.debug( + 'Attempting to find this role: {0}'.format( + self.name if not self.resource_id else self.resource_id)) + role = self.connection.identity.find_role(name_or_id) + self.logger.debug('Found role with this result: {0}'.format(role)) + return role + + def assign_project_role_to_user(self, project_id, user_id, role_id): + params = { + 'project': project_id, + 'user': user_id, + 'role': role_id + } + self.logger.debug( + 'Attempting to assign role to user for this project: {0}'.format( + self.name if not self.resource_id else self.resource_id)) + + self.connection.identity.assign_project_role_to_user(**params) + + def create(self): + self.logger.debug( + 'Attempting to create role with these args: {0}'.format( + self.config)) + role = self.connection.identity.create_role(**self.config) + self.logger.debug('Created role with this result: {0}'.format(role)) + return role + + def delete(self): + role = self.get() + self.logger.debug( + 'Attempting to delete this role: {0}'.format(role)) + result = self.connection.identity.delete_role(role) + self.logger.debug( + 'Deleted role with this result: {0}'.format(result)) + return result + + def update(self, new_config=None): + role = self.get() + self.logger.debug( + 'Attempting to update this role: {0} with args {1}'.format( + role, new_config)) + result = self.connection.identity.update_role(role, **new_config) + self.logger.debug( + 'Updated role with this result: {0}'.format(result)) + return result + + +class OpenstackProject(OpenstackResource): + service_type = 'identity' + resource_type = 'project' + infinite_resource_quota = 10 ** 9 + + def list(self, query=None): + query = query or {} + return self.connection.identity.projects(**query) + + def get_quota_sets(self, quota_type=None): + return self.infinite_resource_quota + + def get(self): + self.logger.debug( + 'Attempting to find this project: {0}'.format( + self.name if not self.resource_id else self.resource_id)) + project = self.connection.identity.get_project( + self.name if not self.resource_id else self.resource_id + ) + self.logger.debug( + 'Found project with this result: {0}'.format(project)) + return project + + def create(self): + self.logger.debug( + 'Attempting to create project with these args: {0}'.format( + self.config)) + project = self.connection.identity.create_project(**self.config) + self.logger.debug( + 'Created project with this result: {0}'.format(project)) + return project + + def delete(self): + project = self.get() + self.logger.debug( + 'Attempting to delete this project: {0}'.format(project)) + result = self.connection.identity.delete_project(project) + self.logger.debug( + 'Deleted project with this result: {0}'.format(result)) + return result + + def update(self, new_config=None): + project = self.get() + self.logger.debug( + 'Attempting to update this project: {0} with args {1}'.format( + project, new_config)) + result = self.connection.identity.update_project(project, **new_config) + self.logger.debug( + 'Updated project with this result: {0}'.format(result)) + return result diff --git a/openstack_sdk/resources/images.py b/openstack_sdk/resources/images.py new file mode 100644 index 00000000..58838b8e --- /dev/null +++ b/openstack_sdk/resources/images.py @@ -0,0 +1,69 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Based on this documentation: +# https://docs.openstack.org/openstacksdk/latest/user/proxies/compute.html. + +# Local imports +from openstack_sdk.common import OpenstackResource + + +class OpenstackImage(OpenstackResource): + service_type = 'compute' + resource_type = 'image' + infinite_resource_quota = 10 ** 9 + + def list(self, query=None): + query = query or {} + return self.connection.image.images(**query) + + def get_quota_sets(self, quota_type=None): + return self.infinite_resource_quota + + def get(self): + self.logger.debug( + 'Attempting to find this image: {0}'.format( + self.name if not self.resource_id else self.resource_id)) + image = self.connection.image.get_image( + self.name if not self.resource_id else self.resource_id + ) + self.logger.debug( + 'Found image with this result: {0}'.format(image)) + return image + + def create(self): + self.logger.debug( + 'Attempting to create image with these args: {0}'.format( + self.config)) + image = self.connection.image.upload_image(**self.config) + self.logger.debug( + 'Created image with this result: {0}'.format(image)) + return image + + def delete(self): + image = self.get() + self.logger.debug( + 'Attempting to delete this image: {0}'.format(image)) + self.connection.image.delete_image(image) + + def update(self, new_config=None): + image = self.get() + self.logger.debug( + 'Attempting to update this image: {0} with args {1}'.format( + image, new_config)) + result = self.connection.image.update_image(image, **new_config) + self.logger.debug( + 'Updated image with this result: {0}'.format(result)) + return result diff --git a/openstack_sdk/resources/networks.py b/openstack_sdk/resources/networks.py new file mode 100644 index 00000000..008632b2 --- /dev/null +++ b/openstack_sdk/resources/networks.py @@ -0,0 +1,467 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Based on this documentation: +# https://docs.openstack.org/openstacksdk/latest/user/proxies/network.html. + +# Local imports +from openstack_sdk.common import OpenstackResource + + +class OpenstackNetwork(OpenstackResource): + # SDK documentation link: + # https://bit.ly/2D2S1xw. + service_type = 'network' + resource_type = 'network' + + def resource_plural(self, openstack_type): + return openstack_type + + def list(self, query=None): + query = query or {} + return self.connection.network.networks(**query) + + def get(self): + self.logger.debug( + 'Attempting to find this network: {0}'.format( + self.name if not self.resource_id else self.resource_id)) + network = self.connection.network.get_network( + self.name if not self.resource_id else self.resource_id) + self.logger.debug( + 'Found network with this result: {0}'.format(network)) + return network + + def find_network(self): + self.logger.debug( + 'Attempting to find this network: {0}'.format(self.name)) + network = self.connection.network.find_network(self.name) + self.logger.debug( + 'Found network with this result: {0}'.format(network)) + return network + + def create(self): + self.logger.debug( + 'Attempting to create network with these args: {0}'.format( + self.config)) + network = self.connection.network.create_network(**self.config) + self.logger.debug( + 'Created network with this result: {0}'.format(network)) + return network + + def delete(self): + network = self.get() + self.logger.debug( + 'Attempting to delete this network: {0}'.format(network)) + result = self.connection.network.delete_network(network) + self.logger.debug( + 'Deleted network with this result: {0}'.format(result)) + return result + + def update(self, new_config=None): + network = self.get() + self.logger.debug( + 'Attempting to update this network: {0} with args {1}'.format( + network, new_config)) + result = self.connection.network.update_network(network, **new_config) + self.logger.debug( + 'Updated network with this result: {0}'.format(result)) + return result + + +class OpenstackSubnet(OpenstackResource): + # SDK documentation link: + # https://bit.ly/2SMLuvY + + service_type = 'network' + resource_type = 'subnet' + + def resource_plural(self, openstack_type): + return openstack_type + + def list(self, query=None): + query = query or {} + return self.connection.network.subnets(**query) + + def get(self): + self.logger.debug( + 'Attempting to find this subnet: {0}'.format( + self.name if not self.resource_id else self.resource_id)) + subnet = self.connection.network.get_subnet( + self.name if not self.resource_id else self.resource_id) + self.logger.debug( + 'Found subnet with this result: {0}'.format(subnet)) + return subnet + + def create(self): + self.logger.debug( + 'Attempting to create subnet with these args: {0}'.format( + self.config)) + subnet = self.connection.network.create_subnet(**self.config) + self.logger.debug( + 'Created subnet with this result: {0}'.format(subnet)) + return subnet + + def delete(self): + subnet = self.get() + self.logger.debug( + 'Attempting to delete this subnet: {0}'.format(subnet)) + result = self.connection.network.delete_subnet(subnet) + self.logger.debug( + 'Deleted subnet with this result: {0}'.format(result)) + return result + + def update(self, new_config=None): + subnet = self.get() + self.logger.debug( + 'Attempting to update this subnet: {0} with args {1}'.format( + subnet, new_config)) + result = self.connection.network.update_subnet(subnet, **new_config) + self.logger.debug( + 'Updated subnet with this result: {0}'.format(result)) + return result + + +class OpenstackPort(OpenstackResource): + # SDK documentation link: + # https://bit.ly/2DlPnUj + service_type = 'network' + resource_type = 'port' + + def resource_plural(self, openstack_type): + return openstack_type + + def list(self, query=None): + query = query or {} + return self.connection.network.ports(**query) + + def get(self): + self.logger.debug( + 'Attempting to find this port: {0}'.format( + self.name if not self.resource_id else self.resource_id)) + port = self.connection.network.get_port( + self.name if not self.resource_id else self.resource_id) + self.logger.debug( + 'Found port with this result: {0}'.format(port)) + return port + + def create(self): + self.logger.debug( + 'Attempting to create port with these args: {0}'.format( + self.config)) + port = self.connection.network.create_port(**self.config) + self.logger.debug( + 'Created port with this result: {0}'.format(port)) + return port + + def delete(self): + port = self.get() + self.logger.debug( + 'Attempting to delete this port: {0}'.format(port)) + result = self.connection.network.delete_port(port) + self.logger.debug( + 'Deleted port with this result: {0}'.format(result)) + return result + + def update(self, new_config=None): + port = self.get() + self.logger.debug( + 'Attempting to update this port: {0} with args {1}'.format( + port, new_config)) + result = self.connection.network.update_port(port, **new_config) + self.logger.debug( + 'Updated port with this result: {0}'.format(result)) + return result + + +class OpenstackRouter(OpenstackResource): + # SDK documentation link: + # https://bit.ly/2QioQdg + service_type = 'network' + resource_type = 'router' + + def resource_plural(self, openstack_type): + return openstack_type + + def list(self, query=None): + query = query or {} + return self.connection.network.routers(**query) + + def get(self): + self.logger.debug( + 'Attempting to find this router: {0}'.format( + self.name if not self.resource_id else self.resource_id)) + router = self.connection.network.get_router( + self.name if not self.resource_id else self.resource_id) + self.logger.debug( + 'Found router with this result: {0}'.format(router)) + return router + + def create(self): + self.logger.debug( + 'Attempting to create router with these args: {0}'.format( + self.config)) + router = self.connection.network.create_router(**self.config) + self.logger.debug( + 'Created router with this result: {0}'.format(router)) + return router + + def delete(self): + router = self.get() + self.logger.debug( + 'Attempting to delete this router: {0}'.format(router)) + result = self.connection.network.delete_router(router) + self.logger.debug( + 'Deleted router with this result: {0}'.format(result)) + return result + + def update(self, new_config=None): + router = self.get() + self.logger.debug( + 'Attempting to update this router: {0} with args {1}'.format( + router, new_config)) + result = self.connection.network.update_router(router, **new_config) + self.logger.debug( + 'Updated router with this result: {0}'.format(result)) + return result + + def add_interface(self, kwargs): + router = self.get() + self.logger.debug( + 'Attempting to add {0} interface this router: {1}'.format( + kwargs, router)) + result = self.connection.network.add_interface_to_router( + router, **kwargs) + self.logger.debug( + 'Added this interface to router: {0}'.format(result)) + return result + + def remove_interface(self, kwargs): + router = self.get() + self.logger.debug( + 'Attempting to remove {0} interface this router: {1}'.format( + kwargs, router)) + result = self.connection.network.remove_interface_from_router( + router, **kwargs) + self.logger.debug( + 'Removed this interface to router: {0}'.format(result)) + return result + + +class OpenstackFloatingIP(OpenstackResource): + # SDK documentation link: + # https://bit.ly/2JGHqcQ + service_type = 'network' + resource_type = 'ip' + + def resource_plural(self, openstack_type): + return openstack_type + + def list(self, query=None): + query = query or {} + return self.connection.network.ips(**query) + + def get(self): + self.logger.debug( + 'Attempting to find this floating ip: {0}'.format( + self.name if not self.resource_id else self.resource_id)) + floating_ip = self.connection.network.get_ip( + self.name if not self.resource_id else self.resource_id) + self.logger.debug( + 'Found floating ip with this result: {0}'.format(floating_ip)) + return floating_ip + + def create(self): + self.logger.debug( + 'Attempting to create floating ip with these args: {0}'.format( + self.config)) + floating_ip = self.connection.network.create_ip(**self.config) + self.logger.debug( + 'Created floating ip with this result: {0}'.format(floating_ip)) + return floating_ip + + def delete(self): + floating_ip = self.get() + self.logger.debug( + 'Attempting to delete this floating ip: {0}'.format(floating_ip)) + self.connection.network.delete_ip(floating_ip) + + def update(self, new_config=None): + floating_ip = self.get() + self.logger.debug( + 'Attempting to update this floating ip: {0} with args {1}'.format( + floating_ip, new_config)) + result = self.connection.network.update_ip(floating_ip, **new_config) + self.logger.debug( + 'Updated floating ip with this result: {0}'.format(result)) + return result + + +class OpenstackSecurityGroup(OpenstackResource): + # SDK documentation link: + # https://bit.ly/2PCsWA0 + service_type = 'network' + resource_type = 'security_group' + + def resource_plural(self, openstack_type): + return openstack_type + + def list(self, query=None): + query = query or {} + return self.connection.network.security_groups(**query) + + def get(self): + self.logger.debug( + 'Attempting to find this security group: {0}'.format( + self.name if not self.resource_id else self.resource_id)) + security_group = self.connection.network.get_security_group( + self.name if not self.resource_id else self.resource_id) + self.logger.debug( + 'Found security group with this result: {0}'.format( + security_group)) + return security_group + + def create(self): + self.logger.debug( + 'Attempting to create security group with these args: {0}'.format( + self.config)) + security_group = self.connection.network.create_security_group( + **self.config) + self.logger.debug( + 'Created security group with this result: {0}'.format( + security_group)) + return security_group + + def delete(self): + security_group = self.get() + self.logger.debug( + 'Attempting to delete this security_group: {0}'.format( + security_group)) + result = self.connection.network.delete_security_group(security_group) + self.logger.debug( + 'Deleted security group with this result: {0}'.format(result)) + return result + + def update(self, new_config=None): + security_group = self.get() + self.logger.debug('Attempting to update this ' + 'security group: {0} with args {1}'.format( + security_group, new_config)) + result = self.connection.network.update_security_group( + security_group, **new_config) + self.logger.debug( + 'Updated security group with this result: {0}'.format(result)) + return result + + +class OpenstackSecurityGroupRule(OpenstackResource): + # SDK documentation link: + # https://bit.ly/2PCsWA0 + service_type = 'network' + resource_type = 'security_group_rule' + + def resource_plural(self, openstack_type): + return openstack_type + + def list(self, query=None): + query = query or {} + return self.connection.network.security_group_rules(**query) + + def get(self): + self.logger.debug( + 'Attempting to find this security group rule: {0}'.format( + self.name if not self.resource_id else self.resource_id)) + security_group_rule = self.connection.network.get_security_group_rule( + self.name if not self.resource_id else self.resource_id) + self.logger.debug( + 'Found security group with this result: {0}'.format( + security_group_rule)) + return security_group_rule + + def create(self): + self.logger.debug('Attempting to create security group rule ' + 'with these args: {0}'.format(self.config)) + security_group_rule = \ + self.connection.network.create_security_group_rule(**self.config) + self.logger.debug( + 'Created security group rule with this result: {0}'.format( + security_group_rule)) + return security_group_rule + + def delete(self): + security_group_rule = self.get() + self.logger.debug( + 'Attempting to delete this security group rule: {0}'.format( + security_group_rule)) + result = self.connection.network.delete_security_group_rule( + security_group_rule) + self.logger.debug( + 'Deleted security group with this result: {0}'.format(result)) + return result + + +class OpenstackRBACPolicy(OpenstackResource): + # SDK documentation link: + # https://bit.ly/2DvKSnI + service_type = 'network' + resource_type = 'rbac_policy' + + def resource_plural(self, openstack_type): + return openstack_type + + def list(self, query=None): + query = query or {} + return self.connection.network.rbac_policies(**query) + + def get(self): + self.logger.debug( + 'Attempting to find this rbac policy: {0}'.format( + self.name if not self.resource_id else self.resource_id)) + rbac_policy = self.connection.network.get_rbac_policy( + self.name if not self.resource_id else self.resource_id) + self.logger.debug( + 'Found rbac policy with this result: {0}'.format( + rbac_policy)) + return rbac_policy + + def create(self): + self.logger.debug('Attempting to create rbac policy ' + 'with these args: {0}'.format(self.config)) + rbac_policy = \ + self.connection.network.create_rbac_policy(**self.config) + self.logger.debug( + 'Created rbac policy with this result: {0}'.format(rbac_policy)) + return rbac_policy + + def delete(self): + rbac_policy = self.get() + self.logger.debug( + 'Attempting to delete this rbac policy: {0}'.format( + rbac_policy)) + result = self.connection.network.delete_rbac_policy(rbac_policy) + self.logger.debug( + 'Deleted rbac policy with this result: {0}'.format(result)) + return result + + def update(self, new_config=None): + rbac_policy = self.get() + self.logger.debug( + 'Attempting to update this rbac policy: {0} with args {1}' + ''.format(rbac_policy, new_config)) + result = self.connection.network.update_rbac_policy( + rbac_policy, **new_config) + self.logger.debug( + 'Updated rbac policy with this result: {0}'.format(result)) + return result diff --git a/openstack_sdk/resources/volume.py b/openstack_sdk/resources/volume.py new file mode 100644 index 00000000..1bbf3bf3 --- /dev/null +++ b/openstack_sdk/resources/volume.py @@ -0,0 +1,174 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Based on this documentation: +# https://docs.openstack.org/openstacksdk/latest/user/proxies/compute.html. + +# Local imports +from openstack_sdk.common import OpenstackResource + + +class OpenstackVolume(OpenstackResource): + service_type = 'volume' + resource_type = 'volume' + + def list(self, query=None): + query = query or {} + return self.connection.block_storage.volumes(**query) + + def get(self): + self.logger.debug( + 'Attempting to find this volume: {0}'.format( + self.name if not self.resource_id else self.resource_id)) + volume = self.connection.block_storage.get_volume( + self.name if not self.resource_id else self.resource_id + ) + self.logger.debug( + 'Found volume with this result: {0}'.format(volume)) + return volume + + def create(self): + self.logger.debug( + 'Attempting to create volume with these args: {0}'.format( + self.config)) + volume = self.connection.block_storage.create_volume(**self.config) + self.logger.debug( + 'Created volume with this result: {0}'.format(volume)) + return volume + + def delete(self): + volume = self.get() + self.logger.debug( + 'Attempting to delete this volume: {0}'.format(volume)) + self.connection.block_storage.delete_volume(volume) + + +class OpenstackVolumeType(OpenstackResource): + service_type = 'volume' + resource_type = 'volume_type' + + def list(self): + return self.connection.block_storage.types() + + def get(self): + self.logger.debug( + 'Attempting to find this volume type: {0}'.format( + self.name if not self.resource_id else self.resource_id)) + volume_type = self.connection.block_storage.get_type( + self.name if not self.resource_id else self.resource_id + ) + self.logger.debug( + 'Found volume type with this result: {0}'.format(volume_type)) + return volume_type + + def create(self): + self.logger.debug( + 'Attempting to create volume type with these args: {0}'.format( + self.config)) + volume_type = self.connection.block_storage.create_type(**self.config) + self.logger.debug( + 'Created volume type with this result: {0}'.format(volume_type)) + return volume_type + + def delete(self): + volume_type = self.get() + self.logger.debug( + 'Attempting to delete this volume type: {0}'.format(volume_type)) + self.connection.block_storage.delete_type(volume_type) + + +class OpenstackVolumeBackup(OpenstackResource): + resource_type = 'backup' + service_type = 'volume' + + def list(self, query=None): + query = query or {} + self.logger.debug('Attempting to list backups') + result = self.connection.block_storage.backups(query) + return result + + def get(self): + self.logger.debug( + 'Attempting to find this backup: {0}'.format( + self.name if not self.resource_id else self.resource_id)) + backup = self.connection.block_storage.get_backup( + self.name if not self.resource_id else self.resource_id + ) + self.logger.debug( + 'Found backup with this result: {0}'.format(backup)) + return backup + + def create(self): + self.logger.debug( + 'Attempting to create backup with these args: {0}'.format( + self.config)) + volume = self.connection.block_storage.create_backup(**self.config) + self.logger.debug( + 'Created backup with this result: {0}'.format(volume)) + return volume + + def restore(self, backup_id, volume_id, name): + self.logger.debug( + 'Attempting to restore backup this volume: {0}'.format(volume_id)) + result = \ + self.connection.block_storage.restore_backup(backup_id, + volume_id, + name) + self.logger.debug( + 'Restored backup volume with this result: {0}'.format(result)) + return result + + def delete(self): + volume = self.get() + self.logger.debug( + 'Attempting to delete this backup: {0}'.format(volume)) + self.connection.block_storage.delete_backup(volume) + + +class OpenstackVolumeSnapshot(OpenstackResource): + resource_type = 'snapshot' + service_type = 'volume' + + def list(self, query=None): + query = query or {} + self.logger.debug('Attempting to list snapshots') + result = self.connection.block_storage.snapshots(query) + return result + + def get(self): + self.logger.debug( + 'Attempting to find this snapshot: {0}'.format( + self.name if not self.resource_id else self.resource_id)) + snapshot = self.connection.block_storage.get_snapshot( + self.name if not self.resource_id else self.resource_id + ) + self.logger.debug( + 'Found snapshot with this result: {0}'.format(snapshot)) + return snapshot + + def create(self): + self.logger.debug( + 'Attempting to create snapshot with these args: {0}'.format( + self.config)) + snapshot = self.connection.block_storage.create_snapshot(**self.config) + self.logger.debug( + 'Created snapshot with this result: {0}'.format(snapshot)) + return snapshot + + def delete(self): + snapshot = self.get() + self.logger.debug( + 'Attempting to delete this snapshot: {0}'.format(snapshot)) + self.connection.block_storage.delete_snapshot(snapshot) diff --git a/openstack_sdk/tests/__init__.py b/openstack_sdk/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/openstack_sdk/tests/base.py b/openstack_sdk/tests/base.py new file mode 100644 index 00000000..013dbe9c --- /dev/null +++ b/openstack_sdk/tests/base.py @@ -0,0 +1,343 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Standard imports +import unittest +import mock + + +# Third party imports +from openstack import exceptions + + +class OpenStackSDKTestBase(unittest.TestCase): + + def setUp(self): + super(OpenStackSDKTestBase, self).setUp() + self.connection = mock.patch('openstack.connect', mock.MagicMock()) + + def tearDown(self): + super(OpenStackSDKTestBase, self).tearDown() + + def get_openstack_connections(self): + return { + 'server': self._fake_compute_server, + 'server_group': self._fake_compute_server_group, + 'host_aggregate': self._fake_compute_host_aggregate, + 'key_pair': self._fake_compute_key_pair, + 'image': self._fake_image, + 'flavor': self._fake_compute_flavor, + 'port': self._fake_network_port, + 'network': self._fake_network, + 'subnet': self._fake_network_subnet, + 'floating_ip': self._fake_network_floating_ip, + 'router': self._fake_network_router, + 'security_group': self._fake_network_security_group, + 'security_group_rule': self._fake_network_security_group_rule, + 'volume': self._fake_block_storage_volume, + 'volume_attachment': self._fake_compute_volume_attachment, + 'volume_type': self._fake_block_storage_type, + 'backup': self._fake_block_storage_backup, + 'snapshot': self._fake_block_storage_snapshot, + 'user': self._fake_identity_user, + 'project': self._fake_identity_project, + 'rbac_policy': self._fake_network_rbac_policy, + } + + @property + def client_config(self): + return { + 'auth_url': 'test_auth_url', + 'username': 'test_username', + 'password': 'test_password', + 'project_name': 'test_project_name', + 'region_name': 'test_region_name' + } + + def _gen_openstack_sdk_error(self, message='SomeThingIsGoingWrong'): + return mock.MagicMock( + side_effect=exceptions.SDKException(message=message)) + + def generate_fake_openstack_connection(self, service_type): + return self.get_openstack_connections()[service_type]() + + def _fake_compute_server(self): + server_conn = mock.MagicMock() + server_conn.servers = self._gen_openstack_sdk_error() + server_conn.get_server = self._gen_openstack_sdk_error() + server_conn.create_server = self._gen_openstack_sdk_error() + server_conn.delete_server = self._gen_openstack_sdk_error() + server_conn.reboot_server = self._gen_openstack_sdk_error() + server_conn.resume_server = self._gen_openstack_sdk_error() + server_conn.suspend_server = self._gen_openstack_sdk_error() + server_conn.backup_server = self._gen_openstack_sdk_error() + server_conn.rebuild_server = self._gen_openstack_sdk_error() + server_conn.create_server_image = self._gen_openstack_sdk_error() + server_conn.update_server = self._gen_openstack_sdk_error() + server_conn.start_server = self._gen_openstack_sdk_error() + server_conn.stop_server = self._gen_openstack_sdk_error() + server_conn.get_server_password = self._gen_openstack_sdk_error() + server_conn.volume_attachments = self._gen_openstack_sdk_error() + server_conn.get_volume_attachment = self._gen_openstack_sdk_error() + server_conn.create_volume_attachment = self._gen_openstack_sdk_error() + server_conn.delete_volume_attachment = self._gen_openstack_sdk_error() + server_conn.create_server_interface = self._gen_openstack_sdk_error() + server_conn.delete_server_interface = self._gen_openstack_sdk_error() + server_conn.get_server_interface = self._gen_openstack_sdk_error() + server_conn.server_interfaces = self._gen_openstack_sdk_error() + server_conn.add_security_group_to_server = \ + self._gen_openstack_sdk_error() + server_conn.remove_security_group_from_server = \ + self._gen_openstack_sdk_error() + server_conn.add_floating_ip_to_server = \ + self._gen_openstack_sdk_error() + server_conn.remove_floating_ip_from_server = \ + self._gen_openstack_sdk_error() + + self.connection.compute = server_conn + return self.connection.compute + + def _fake_compute_host_aggregate(self): + host_aggregate_conn = mock.MagicMock() + host_aggregate_conn.aggregates = self._gen_openstack_sdk_error() + host_aggregate_conn.get_aggregate = self._gen_openstack_sdk_error() + host_aggregate_conn.create_aggregate = self._gen_openstack_sdk_error() + host_aggregate_conn.delete_aggregate = self._gen_openstack_sdk_error() + host_aggregate_conn.update_aggregate = self._gen_openstack_sdk_error() + + self.connection.compute = host_aggregate_conn + return self.connection.compute + + def _fake_compute_server_group(self): + server_group_conn = mock.MagicMock() + server_group_conn.server_groups = self._gen_openstack_sdk_error() + server_group_conn.get_server_group = self._gen_openstack_sdk_error() + server_group_conn.create_server_group = self._gen_openstack_sdk_error() + server_group_conn.delete_server_group = self._gen_openstack_sdk_error() + + self.connection.compute = server_group_conn + return self.connection.compute + + def _fake_compute_key_pair(self): + key_pair_conn = mock.MagicMock() + key_pair_conn.keypairs = self._gen_openstack_sdk_error() + key_pair_conn.get_keypair = self._gen_openstack_sdk_error() + key_pair_conn.create_keypair = self._gen_openstack_sdk_error() + key_pair_conn.delete_keypair = self._gen_openstack_sdk_error() + key_pair_conn.update_keypair = self._gen_openstack_sdk_error() + + self.connection.compute = key_pair_conn + return self.connection.compute + + def _fake_compute_flavor(self): + flavor_conn = mock.MagicMock() + flavor_conn.flavors = self._gen_openstack_sdk_error() + flavor_conn.get_flavor = self._gen_openstack_sdk_error() + flavor_conn.create_flavor = self._gen_openstack_sdk_error() + flavor_conn.delete_flavor = self._gen_openstack_sdk_error() + + self.connection.compute = flavor_conn + return self.connection.compute + + def _fake_compute_volume_attachment(self): + volume_attachment = mock.MagicMock() + volume_attachment.volume_attachments = self._gen_openstack_sdk_error() + + volume_attachment.get_volume_attachment =\ + self._gen_openstack_sdk_error() + + volume_attachment.create_volume_attachment =\ + self._gen_openstack_sdk_error() + + volume_attachment.delete_volume_attachment =\ + self._gen_openstack_sdk_error() + + volume_attachment.update_volume_attachment =\ + self._gen_openstack_sdk_error() + + self.connection.compute = volume_attachment + return self.connection.compute + + def _fake_image(self): + image_conn = mock.MagicMock() + image_conn.images = self._gen_openstack_sdk_error() + image_conn.get_image = self._gen_openstack_sdk_error() + image_conn.upload_image = self._gen_openstack_sdk_error() + image_conn.delete_image = self._gen_openstack_sdk_error() + image_conn.update_image = self._gen_openstack_sdk_error() + + self.connection.image = image_conn + return self.connection.image + + def _fake_network(self): + network_conn = mock.MagicMock() + network_conn.networks = self._gen_openstack_sdk_error() + network_conn.get_network = self._gen_openstack_sdk_error() + network_conn.create_network = self._gen_openstack_sdk_error() + network_conn.delete_network = self._gen_openstack_sdk_error() + network_conn.update_network = self._gen_openstack_sdk_error() + + self.connection.network = network_conn + return self.connection.network + + def _fake_network_subnet(self): + subnet_conn = mock.MagicMock() + subnet_conn.subnets = self._gen_openstack_sdk_error() + subnet_conn.get_subnet = self._gen_openstack_sdk_error() + subnet_conn.create_subnet = self._gen_openstack_sdk_error() + subnet_conn.delete_subnet = self._gen_openstack_sdk_error() + subnet_conn.update_subnet = self._gen_openstack_sdk_error() + + self.connection.network = subnet_conn + return self.connection.network + + def _fake_network_port(self): + port_conn = mock.MagicMock() + port_conn.ports = self._gen_openstack_sdk_error() + port_conn.get_port = self._gen_openstack_sdk_error() + port_conn.create_port = self._gen_openstack_sdk_error() + port_conn.delete_port = self._gen_openstack_sdk_error() + port_conn.update_port = self._gen_openstack_sdk_error() + + self.connection.network = port_conn + return self.connection.network + + def _fake_network_router(self): + router_conn = mock.MagicMock() + router_conn.routers = self._gen_openstack_sdk_error() + router_conn.get_router = self._gen_openstack_sdk_error() + router_conn.create_router = self._gen_openstack_sdk_error() + router_conn.delete_router = self._gen_openstack_sdk_error() + router_conn.update_router = self._gen_openstack_sdk_error() + + self.connection.network = router_conn + return self.connection.network + + def _fake_network_floating_ip(self): + floating_ip_conn = mock.MagicMock() + floating_ip_conn.ips = self._gen_openstack_sdk_error() + floating_ip_conn.get_ip = self._gen_openstack_sdk_error() + floating_ip_conn.create_ip = self._gen_openstack_sdk_error() + floating_ip_conn.delete_ip = self._gen_openstack_sdk_error() + floating_ip_conn.update_ip = self._gen_openstack_sdk_error() + + self.connection.network = floating_ip_conn + return self.connection.network + + def _fake_network_security_group(self): + security_group_conn = mock.MagicMock() + + security_group_conn.security_groups = self._gen_openstack_sdk_error() + + security_group_conn.get_security_group =\ + self._gen_openstack_sdk_error() + + security_group_conn.create_security_group =\ + self._gen_openstack_sdk_error() + + security_group_conn.delete_security_group =\ + self._gen_openstack_sdk_error() + + security_group_conn.update_security_group =\ + self._gen_openstack_sdk_error() + + self.connection.network = security_group_conn + return self.connection.network + + def _fake_network_security_group_rule(self): + security_group_rule_conn = mock.MagicMock() + + security_group_rule_conn.security_group_rules =\ + self._gen_openstack_sdk_error() + + security_group_rule_conn.get_security_group_rule = \ + self._gen_openstack_sdk_error() + + security_group_rule_conn.create_security_group_rule = \ + self._gen_openstack_sdk_error() + + security_group_rule_conn.delete_security_group_rule = \ + self._gen_openstack_sdk_error() + + self.connection.network = security_group_rule_conn + return self.connection.network + + def _fake_network_rbac_policy(self): + rbac_policy_conn = mock.MagicMock() + rbac_policy_conn.rbac_policies = self._gen_openstack_sdk_error() + rbac_policy_conn.get_rbac_policy = self._gen_openstack_sdk_error() + rbac_policy_conn.create_rbac_policy = self._gen_openstack_sdk_error() + rbac_policy_conn.delete_rbac_policy = self._gen_openstack_sdk_error() + rbac_policy_conn.update_rbac_policy = self._gen_openstack_sdk_error() + self.connection.network = rbac_policy_conn + return self.connection.network + + def _fake_block_storage_volume(self): + volume_conn = mock.MagicMock() + volume_conn.volumes = self._gen_openstack_sdk_error() + volume_conn.get_volume = self._gen_openstack_sdk_error() + volume_conn.create_volume = self._gen_openstack_sdk_error() + volume_conn.delete_volume = self._gen_openstack_sdk_error() + volume_conn.extend_volume = self._gen_openstack_sdk_error() + self.connection.block_storage = volume_conn + return self.connection.block_storage + + def _fake_block_storage_type(self): + type_vol = mock.MagicMock() + type_vol.types = self._gen_openstack_sdk_error() + type_vol.get_type = self._gen_openstack_sdk_error() + type_vol.create_type = self._gen_openstack_sdk_error() + type_vol.delete_type = self._gen_openstack_sdk_error() + self.connection.block_storage = type_vol + return self.connection.block_storage + + def _fake_block_storage_snapshot(self): + snapshot = mock.MagicMock() + snapshot.snapshots = self._gen_openstack_sdk_error() + snapshot.get_snapshot = self._gen_openstack_sdk_error() + snapshot.create_snapshot = self._gen_openstack_sdk_error() + snapshot.delete_snapshot = self._gen_openstack_sdk_error() + self.connection.block_storage = snapshot + return self.connection.block_storage + + def _fake_block_storage_backup(self): + backup = mock.MagicMock() + backup.backups = self._gen_openstack_sdk_error() + backup.get_backup = self._gen_openstack_sdk_error() + backup.create_backup = self._gen_openstack_sdk_error() + backup.delete_backup = self._gen_openstack_sdk_error() + backup.restore_backup = self._gen_openstack_sdk_error() + self.connection.block_storage = backup + return self.connection.block_storage + + def _fake_identity_user(self): + user_conn = mock.MagicMock() + user_conn.users = self._gen_openstack_sdk_error() + user_conn.get_user = self._gen_openstack_sdk_error() + user_conn.create_user = self._gen_openstack_sdk_error() + user_conn.delete_user = self._gen_openstack_sdk_error() + user_conn.update_user = self._gen_openstack_sdk_error() + self.connection.identity = user_conn + return self.connection.identity + + def _fake_identity_project(self): + project_conn = mock.MagicMock() + project_conn.projects = self._gen_openstack_sdk_error() + project_conn.get_project = self._gen_openstack_sdk_error() + project_conn.create_project = self._gen_openstack_sdk_error() + project_conn.delete_project = self._gen_openstack_sdk_error() + project_conn.update_project = self._gen_openstack_sdk_error() + self.connection.identity = project_conn + return self.connection.identity diff --git a/openstack_sdk/tests/compute/__init__.py b/openstack_sdk/tests/compute/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/openstack_sdk/tests/compute/test_flavor.py b/openstack_sdk/tests/compute/test_flavor.py new file mode 100644 index 00000000..749e05ff --- /dev/null +++ b/openstack_sdk/tests/compute/test_flavor.py @@ -0,0 +1,126 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Standard imports +import mock + +# Third party imports +import openstack.compute.v2.flavor + +# Local imports +from openstack_sdk.tests import base +from openstack_sdk.resources import compute + + +class FlavorTestCase(base.OpenStackSDKTestBase): + def setUp(self): + super(FlavorTestCase, self).setUp() + self.fake_client = self.generate_fake_openstack_connection('flavor') + self.flavor_instance = compute.OpenstackFlavor( + client_config=self.client_config, + logger=mock.MagicMock() + ) + self.flavor_instance.connection = self.connection + + def test_get_flavor(self): + flavor = openstack.compute.v2.flavor.Flavor(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_flavor', + 'links': '2', + 'description': 'Testing flavor', + 'os-flavor-access:is_public': True, + 'ram': 6, + 'vcpus': 8, + 'swap': 8 + + }) + self.flavor_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_flavor = mock.MagicMock(return_value=flavor) + + response = self.flavor_instance.get() + self.assertEqual(response.id, 'a95b5509-c122-4c2f-823e-884bb559afe8') + self.assertEqual(response.links, '2') + self.assertEqual(response.name, 'test_flavor') + + def test_list_flavors(self): + flavors = [ + openstack.compute.v2.flavor.FlavorDetail(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_flavor_1', + 'links': '2', + 'description': 'Testing flavor 1', + 'os-flavor-access:is_public': True, + 'ram': 6, + 'vcpus': 8, + 'swap': 8 + }), + openstack.compute.v2.flavor.FlavorDetail(**{ + 'id': 'fg5b5509-c122-4c2f-823e-884bb559afes', + 'name': 'test_flavor_2', + 'links': '3', + 'description': 'Testing flavor 2', + 'os-flavor-access:is_public': True, + 'ram': 4, + 'vcpus': 3, + 'swap': 3 + }) + ] + + self.fake_client.flavors = mock.MagicMock(return_value=flavors) + + response = self.flavor_instance.list() + self.assertEqual(len(response), 2) + + def test_create_flavor(self): + flavor = { + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'links': '2', + 'name': 'test_flavor', + 'description': 'Testing flavor', + 'os-flavor-access:is_public': True, + 'ram': 6, + 'vcpus': 8, + 'swap': 8 + } + + new_res = openstack.compute.v2.flavor.Flavor(**flavor) + self.flavor_instance.config = flavor + self.fake_client.create_flavor = mock.MagicMock(return_value=new_res) + + response = self.flavor_instance.create() + self.assertEqual(response.id, flavor['id']) + self.assertEqual(response.name, flavor['name']) + + def test_delete_flavor(self): + flavor = openstack.compute.v2.flavor.Flavor(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_flavor', + 'links': '2', + 'description': 'Testing flavor', + 'os-flavor-access:is_public': True, + 'ram': 6, + 'vcpus': 8, + 'swap': 8 + + }) + + self.flavor_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_flavor = mock.MagicMock(return_value=flavor) + self.fake_client.delete_flavor = mock.MagicMock(return_value=None) + + response = self.flavor_instance.delete() + self.assertIsNone(response) diff --git a/openstack_sdk/tests/compute/test_host_aggregate.py b/openstack_sdk/tests/compute/test_host_aggregate.py new file mode 100644 index 00000000..347db1b8 --- /dev/null +++ b/openstack_sdk/tests/compute/test_host_aggregate.py @@ -0,0 +1,134 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Standard imports +import mock + +# Third party imports +import openstack.compute.v2.aggregate + +# Local imports +from openstack_sdk.tests import base +from openstack_sdk.resources import compute + + +class HostAggregateTestCase(base.OpenStackSDKTestBase): + def setUp(self): + super(HostAggregateTestCase, self).setUp() + self.fake_client =\ + self.generate_fake_openstack_connection('host_aggregate') + self.host_aggregate_instance = compute.OpenstackHostAggregate( + client_config=self.client_config, + logger=mock.MagicMock() + ) + self.host_aggregate_instance.connection = self.connection + + def test_get_host_aggregate(self): + aggregate = openstack.compute.v2.aggregate.Aggregate(**{ + 'id': 'a34b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_aggregate', + 'availability_zone': 'test_availability_zone', + }) + + self.host_aggregate_instance.name = 'test_aggregate' + self.host_aggregate_instance.id = \ + 'a34b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_aggregate = mock.MagicMock(return_value=aggregate) + + response = self.host_aggregate_instance.get() + self.assertEqual(response.id, 'a34b5509-c122-4c2f-823e-884bb559afe8') + self.assertEqual(response.name, 'test_aggregate') + + def test_list_aggregates(self): + aggregate_list = [ + openstack.compute.v2.aggregate.Aggregate(**{ + 'id': 'a34b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_aggregate_1', + 'availability_zone': 'test_availability_zone_1', + }), + openstack.compute.v2.aggregate.Aggregate(**{ + 'id': 'a44b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_aggregate_2', + 'availability_zone': 'test_availability_zone_2', + }), + ] + + self.fake_client.aggregates = \ + mock.MagicMock(return_value=aggregate_list) + response = self.host_aggregate_instance.list() + self.assertEqual(len(response), 2) + + def test_create_aggregate(self): + config = { + 'name': 'test_aggregate', + 'availability_zone': 'test_availability_zone', + } + + aggregate = { + 'id': 'a34b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_aggregate', + 'availability_zone': 'test_availability_zone', + } + + self.host_aggregate_instance.config = config + new_res = openstack.compute.v2.aggregate.Aggregate(**aggregate) + self.fake_client.create_aggregate = \ + mock.MagicMock(return_value=new_res) + + response = self.host_aggregate_instance.create() + self.assertEqual(response.name, config['name']) + + def test_update_aggregate(self): + old_aggregate = openstack.compute.v2.aggregate.Aggregate(**{ + 'id': 'a34b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_aggregate', + 'availability_zone': 'test_availability_zone', + }) + + new_config = { + 'name': 'update_test_aggregate', + } + + new_aggregate = openstack.compute.v2.aggregate.Aggregate(**{ + 'id': 'a34b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'update_test_aggregate', + 'availability_zone': 'test_availability_zone', + }) + + self.host_aggregate_instance.resource_id = \ + 'a34b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_aggregate = \ + mock.MagicMock(return_value=old_aggregate) + self.fake_client.update_aggregate =\ + mock.MagicMock(return_value=new_aggregate) + + response = self.host_aggregate_instance.update(new_config=new_config) + self.assertNotEqual(response.name, old_aggregate.name) + + def test_delete_server(self): + aggregate = openstack.compute.v2.aggregate.Aggregate(**{ + 'id': 'a34b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_aggregate', + 'availability_zone': 'test_availability_zone', + + }) + + self.host_aggregate_instance.resource_id = \ + 'a34b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_aggregate = mock.MagicMock(return_value=aggregate) + self.fake_client.delete_aggregate = mock.MagicMock(return_value=None) + + response = self.host_aggregate_instance.delete() + self.assertIsNone(response) diff --git a/openstack_sdk/tests/compute/test_keypair.py b/openstack_sdk/tests/compute/test_keypair.py new file mode 100644 index 00000000..cb4db89f --- /dev/null +++ b/openstack_sdk/tests/compute/test_keypair.py @@ -0,0 +1,103 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Standard imports +import mock + +# Third party imports +import openstack.compute.v2.keypair + +# Local imports +from openstack_sdk.tests import base +from openstack_sdk.resources import compute + + +class KeyPairTestCase(base.OpenStackSDKTestBase): + def setUp(self): + super(KeyPairTestCase, self).setUp() + self.fake_client = self.generate_fake_openstack_connection('key_pair') + self.keypair_instance = compute.OpenstackKeyPair( + client_config=self.client_config, + logger=mock.MagicMock() + ) + self.keypair_instance.connection = self.connection + + def test_get_keypair(self): + keypair = openstack.compute.v2.keypair.Keypair(**{ + 'name': 'test_key_pair', + 'fingerprint': 'test_fingerprint', + 'public_key': 'test_public_key' + + }) + self.keypair_instance.name = 'test_key_pair' + self.fake_client.get_keypair = mock.MagicMock(return_value=keypair) + + response = self.keypair_instance.get() + self.assertEqual(response.name, 'test_key_pair') + self.assertEqual(response.fingerprint, 'test_fingerprint') + self.assertEqual(response.public_key, 'test_public_key') + + def test_list_keypairs(self): + keypair_list = [ + openstack.compute.v2.keypair.Keypair(**{ + 'name': 'test_key_pair_1', + 'fingerprint': 'test_fingerprint_1', + 'public_key': 'test_public_key_1' + }), + openstack.compute.v2.keypair.Keypair(**{ + 'name': 'test_key_pair_2', + 'fingerprint': 'test_fingerprint_2', + 'public_key': 'test_public_key_2' + }), + ] + + self.fake_client.keypairs = mock.MagicMock(return_value=keypair_list) + + response = self.keypair_instance.list() + self.assertEqual(len(response), 2) + + def test_create_keypair(self): + config = { + 'name': 'test_key_pair', + } + + keypair = { + 'name': 'test_key_pair', + 'fingerprint': 'test_fingerprint_1', + 'public_key': 'test_public_key_1', + 'private_key': 'test_private_key_1' + } + + self.keypair_instance.config = config + new_res = openstack.compute.v2.keypair.Keypair(**keypair) + self.fake_client.create_keypair = mock.MagicMock(return_value=new_res) + + response = self.keypair_instance.create() + self.assertEqual(response.name, config['name']) + + def test_delete_flavor(self): + keypair = openstack.compute.v2.keypair.Keypair(**{ + 'name': 'test_key_pair', + 'fingerprint': 'test_fingerprint', + 'public_key': 'test_public_key' + + }) + + self.keypair_instance.resource_id = '2' + self.fake_client.get_keypair = mock.MagicMock(return_value=keypair) + self.fake_client.delete_keypair = mock.MagicMock(return_value=None) + + response = self.keypair_instance.delete() + self.assertIsNone(response) diff --git a/openstack_sdk/tests/compute/test_server.py b/openstack_sdk/tests/compute/test_server.py new file mode 100644 index 00000000..528d63da --- /dev/null +++ b/openstack_sdk/tests/compute/test_server.py @@ -0,0 +1,757 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Standard imports +import mock + +# Third party imports +import openstack.compute.v2.server +import openstack.compute.v2.volume_attachment +import openstack.compute.v2.server_interface + +# Local imports +from openstack_sdk.tests import base +from openstack_sdk.resources import compute + + +class ServerTestCase(base.OpenStackSDKTestBase): + def setUp(self): + super(ServerTestCase, self).setUp() + self.fake_client = self.generate_fake_openstack_connection('server') + self.server_instance = compute.OpenstackServer( + client_config=self.client_config, + logger=mock.MagicMock() + ) + self.server_instance.connection = self.connection + + def test_get_server(self): + server = openstack.compute.v2.server.Server(**{ + 'id': 'a34b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {'region': '3'}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + + }) + + self.server_instance.name = 'test_server' + self.server_instance.id = 'a34b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_server = mock.MagicMock(return_value=server) + + response = self.server_instance.get() + self.assertEqual(response.id, 'a34b5509-c122-4c2f-823e-884bb559afe8') + self.assertEqual(response.name, 'test_server') + + def test_get_password(self): + server = openstack.compute.v2.server.Server(**{ + 'id': 'a34b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {'region': '3'}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + + }) + + self.server_instance.name = 'test_server' + self.server_instance.id = 'a34b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_server = mock.MagicMock(return_value=server) + self.fake_client.get_server_password = \ + mock.MagicMock(return_value='884bb559afe8') + + password = self.server_instance.get_server_password() + self.assertEqual(password, '884bb559afe8') + + def test_list_servers(self): + server_list = [ + openstack.compute.v2.server.ServerDetail(**{ + 'id': 'a34b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server_1', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {'region': '3'}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + }), + openstack.compute.v2.server.ServerDetail(**{ + 'id': 'b24b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server_2', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {'region': '3'}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + }), + ] + + self.fake_client.servers = mock.MagicMock(return_value=server_list) + response = self.server_instance.list() + self.assertEqual(len(response), 2) + + def test_create_server(self): + config = { + 'name': 'test_server', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + } + + server = { + 'id': 'a34b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {'region': '3'}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + } + + self.server_instance.config = config + new_res = openstack.compute.v2.server.Server(**server) + self.fake_client.create_server = mock.MagicMock(return_value=new_res) + + response = self.server_instance.create() + self.assertEqual(response.name, config['name']) + + def test_update_server(self): + old_server = openstack.compute.v2.server.Server(**{ + 'id': 'a34b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {'region': '3'}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + }) + + new_config = { + 'name': 'update_test_server', + } + + new_server = openstack.compute.v2.server.Server(**{ + 'id': 'a34b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'update_test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {'region': '3'}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + }) + + self.server_instance.resource_id = \ + 'a34b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_server = mock.MagicMock(return_value=old_server) + self.fake_client.update_server =\ + mock.MagicMock(return_value=new_server) + + response = self.server_instance.update(new_config=new_config) + self.assertNotEqual(response.name, old_server.name) + + def test_delete_server(self): + server = openstack.compute.v2.server.Server(**{ + 'id': 'a34b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {'region': '3'}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + + }) + + self.server_instance.resource_id = \ + 'a34b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_server = mock.MagicMock(return_value=server) + self.fake_client.delete_server = mock.MagicMock(return_value=None) + + response = self.server_instance.delete() + self.assertIsNone(response) + + def test_reboot_server(self): + server = openstack.compute.v2.server.Server(**{ + 'id': 'a34b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {'region': '3'}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + 'status': 'ACTIVE', + + }) + + self.server_instance.resource_id = \ + 'a34b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_server = mock.MagicMock(return_value=server) + self.fake_client.reboot_server = mock.MagicMock(return_value=None) + + response = self.server_instance.reboot(reboot_type='SOFT') + self.assertIsNone(response) + + def test_resume_server(self): + server = openstack.compute.v2.server.Server(**{ + 'id': 'a34b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {'region': '3'}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + 'status': 'ACTIVE', + + }) + + self.server_instance.resource_id = \ + 'a34b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_server = mock.MagicMock(return_value=server) + self.fake_client.resume_server = mock.MagicMock(return_value=None) + + response = self.server_instance.resume() + self.assertIsNone(response) + + def test_suspend_server(self): + server = openstack.compute.v2.server.Server(**{ + 'id': 'a34b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {'region': '3'}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + 'status': 'ACTIVE', + + }) + + self.server_instance.resource_id = \ + 'a34b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_server = mock.MagicMock(return_value=server) + self.fake_client.suspend_server = mock.MagicMock(return_value=None) + + response = self.server_instance.suspend() + self.assertIsNone(response) + + def test_backup_server(self): + server = openstack.compute.v2.server.Server(**{ + 'id': 'a34b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {'region': '3'}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + 'status': 'ACTIVE', + + }) + + self.server_instance.resource_id = \ + 'a34b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_server = mock.MagicMock(return_value=server) + self.fake_client.backup_server = mock.MagicMock(return_value=None) + + response = self.server_instance.backup('test-backup', 'daily', 30) + self.assertIsNone(response) + + def test_rebuild_server(self): + server = openstack.compute.v2.server.Server(**{ + 'id': 'a34b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {'region': '3'}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + 'status': 'ACTIVE', + + }) + + self.server_instance.resource_id = \ + 'a34b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_server = mock.MagicMock(return_value=server) + self.fake_client.rebuild_server = mock.MagicMock(return_value=None) + + response = self.server_instance.rebuild('12323') + self.assertIsNone(response) + + def test_create_image(self): + server = openstack.compute.v2.server.Server(**{ + 'id': 'a34b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {'region': '3'}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + 'status': 'ACTIVE', + + }) + + self.server_instance.resource_id = \ + 'a34b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_server = mock.MagicMock(return_value=server) + self.fake_client.create_server_image = \ + mock.MagicMock(return_value=None) + + response = self.server_instance.create_image('test-image') + self.assertIsNone(response) + + def test_start_server(self): + server = openstack.compute.v2.server.Server(**{ + 'id': 'a34b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {'region': '3'}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + + }) + + self.server_instance.resource_id = \ + 'a34b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_server = mock.MagicMock(return_value=server) + self.fake_client.start_server = \ + mock.MagicMock(return_value=None) + + response = self.server_instance.start() + self.assertIsNone(response) + + def test_stop_server(self): + server = openstack.compute.v2.server.Server(**{ + 'id': 'a34b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {'region': '3'}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + + }) + + self.server_instance.resource_id = \ + 'a34b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_server = mock.MagicMock(return_value=server) + self.fake_client.stop_server = \ + mock.MagicMock(return_value=None) + + response = self.server_instance.stop() + self.assertIsNone(response) + + def test_get_server_password(self): + server = openstack.compute.v2.server.Server(**{ + 'id': 'a34b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {'region': '3'}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + + }) + password = 'rasmuslerdorf' + self.server_instance.resource_id = \ + 'a34b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_server = mock.MagicMock(return_value=server) + self.fake_client.get_server_password = \ + mock.MagicMock(return_value=password) + + response = self.server_instance.get_server_password() + self.assertEqual(response, password) + + def test_list_volume_attachments(self): + server = openstack.compute.v2.server.Server(**{ + 'id': 'a34b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {'region': '3'}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + + }) + volume_attachments = [ + openstack.compute.v2.volume_attachment.VolumeAttachment(**{ + 'id': 'a34b5509-d122-4d2f-823e-884bb559afe8', + 'server_id': 'a34b5509-c122-4c2f-823e-884bb559afe8', + 'volume_id': '1', + 'attachment_id': '1', + }) + ] + self.server_instance.resource_id = \ + 'a34b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_server = mock.MagicMock(return_value=server) + self.fake_client.volume_attachments = \ + mock.MagicMock(return_value=volume_attachments) + + response = self.server_instance.list_volume_attachments() + self.assertEqual(len(response), 1) + + def test_get_volume_attachment(self): + server = openstack.compute.v2.server.Server(**{ + 'id': 'a34b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {'region': '3'}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + + }) + volume_attachment = \ + openstack.compute.v2.volume_attachment.VolumeAttachment(**{ + 'id': 'a34b5509-d122-4d2f-823e-884bb559afe8', + 'server_id': 'a34b5509-c122-4c2f-823e-884bb559afe8', + 'volume_id': '3', + 'attachment_id': '4', + }) + self.server_instance.resource_id = \ + 'a34b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_server = mock.MagicMock(return_value=server) + self.fake_client.get_volume_attachment = \ + mock.MagicMock(return_value=volume_attachment) + + response = self.server_instance.get_volume_attachment('4') + self.assertEqual(response.id, 'a34b5509-d122-4d2f-823e-884bb559afe8') + self.assertEqual(response.server_id, + 'a34b5509-c122-4c2f-823e-884bb559afe8') + self.assertEqual(response.volume_id, '3') + self.assertEqual(response.attachment_id, '4') + + def test_create_volume_attachment(self): + server = openstack.compute.v2.server.Server(**{ + 'id': 'a34b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {'region': '3'}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + + }) + + volume_config = { + 'volume_id': '1', + 'device': 'auto' + } + volume_attachment = \ + openstack.compute.v2.volume_attachment.VolumeAttachment(**{ + 'id': 'a34b5509-d122-4d2f-823e-884bb559afe8', + 'server_id': 'a34b5509-c122-4c2f-823e-884bb559afe8', + 'volume_id': '3', + 'attachment_id': '4', + }) + self.server_instance.resource_id = \ + 'a34b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_server = mock.MagicMock(return_value=server) + self.fake_client.create_volume_attachment = \ + mock.MagicMock(return_value=volume_attachment) + + response = self.server_instance.create_volume_attachment(volume_config) + self.assertEqual(response.id, 'a34b5509-d122-4d2f-823e-884bb559afe8') + self.assertEqual(response.server_id, + 'a34b5509-c122-4c2f-823e-884bb559afe8') + self.assertEqual(response.volume_id, '3') + self.assertEqual(response.attachment_id, '4') + + def test_delete_volume_attachment(self): + server = openstack.compute.v2.server.Server(**{ + 'id': 'a34b5509-d122-4d2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {'region': '3'}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + + }) + + self.server_instance.resource_id = \ + 'a34b5509-d122-4d2f-823e-884bb559afe8' + self.fake_client.get_server = mock.MagicMock(return_value=server) + self.fake_client.delete_volume_attachment = \ + mock.MagicMock(return_value=None) + + response = self.server_instance.delete_volume_attachment( + 'a34b5509-d122-4d2f-823e-884bb559afe8') + self.assertIsNone(response) + + def test_list_server_interfaces(self): + server = openstack.compute.v2.server.Server(**{ + 'id': 'a34b5509-d122-4d2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {'region': '3'}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + + }) + server_interfaces = [ + openstack.compute.v2.server_interface.ServerInterface(**{ + 'id': 'a34b5509-d122-4d2f-823e-884bb559afe5', + 'net_id': 'a34b5509-d122-4d2f-823e-884bb559afe6', + 'port_id': 'a34b5509-d122-4d2f-823e-884bb559afe7', + 'server_id': 'a34b5509-d122-4d2f-823e-884bb559afe8', + }), + openstack.compute.v2.server_interface.ServerInterface(**{ + 'id': 'a34b5509-d122-4d2f-823e-884bb559afe4', + 'net_id': 'a34b5509-d122-4d2f-823e-884bb559afe2', + 'port_id': 'a34b5509-d122-4d2f-823e-884bb559afe1', + 'server_id': 'a34b5509-d122-4d2f-823e-884bb559afe8', + }) + ] + self.server_instance.resource_id = \ + 'a34b5509-d122-4d2f-823e-884bb559afe8' + self.fake_client.get_server = mock.MagicMock(return_value=server) + self.fake_client.server_interfaces = \ + mock.MagicMock(return_value=server_interfaces) + + response = self.server_instance.server_interfaces() + self.assertEqual(len(response), 2) + + def test_get_server_interface(self): + server = openstack.compute.v2.server.Server(**{ + 'id': 'a34b5509-d122-4d2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {'region': '3'}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + + }) + server_interface = \ + openstack.compute.v2.server_interface.ServerInterface(**{ + 'id': 'a34b5509-d122-4d2f-823e-884bb559afe5', + 'net_id': 'a34b5509-d122-4d2f-823e-884bb559afe6', + 'port_id': 'a34b5509-d122-4d2f-823e-884bb559afe7', + 'server_id': 'a34b5509-d122-4d2f-823e-884bb559afe8', + }) + self.server_instance.resource_id = \ + 'a34b5509-d122-4d2f-823e-884bb559afe8' + self.fake_client.get_server = mock.MagicMock(return_value=server) + self.fake_client.get_server_interface = \ + mock.MagicMock(return_value=server_interface) + + response = self.server_instance.get_server_interface( + 'a34b5509-d122-4d2f-823e-884bb559afe5') + self.assertEqual(response.id, 'a34b5509-d122-4d2f-823e-884bb559afe5') + self.assertEqual(response.net_id, + 'a34b5509-d122-4d2f-823e-884bb559afe6') + self.assertEqual(response.port_id, + 'a34b5509-d122-4d2f-823e-884bb559afe7') + self.assertEqual(response.server_id, + 'a34b5509-d122-4d2f-823e-884bb559afe8') + + def test_create_server_interface(self): + server = openstack.compute.v2.server.Server(**{ + 'id': 'a34b5509-d122-4d2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {'region': '3'}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + + }) + + interface_config = { + 'port_id': 'a34b5509-d122-4d2f-823e-884bb559afa7', + } + server_interface = \ + openstack.compute.v2.server_interface.ServerInterface(**{ + 'id': 'a34b5509-d122-4d2f-823e-884bb559afe5', + 'net_id': 'a34b5509-d122-4d2f-823e-884bb559afe6', + 'port_id': 'a34b5509-d122-4d2f-823e-884bb559afa7', + 'server_id': 'a34b5509-d122-4d2f-823e-884bb559afe8', + }) + self.server_instance.resource_id = \ + 'a34b5509-d122-4d2f-823e-884bb559afe8' + self.fake_client.get_server = mock.MagicMock(return_value=server) + self.fake_client.create_server_interface = \ + mock.MagicMock(return_value=server_interface) + + response = \ + self.server_instance.create_server_interface(interface_config) + self.assertEqual(response.id, 'a34b5509-d122-4d2f-823e-884bb559afe5') + self.assertEqual(response.server_id, + 'a34b5509-d122-4d2f-823e-884bb559afe8') + self.assertEqual(response.port_id, + 'a34b5509-d122-4d2f-823e-884bb559afa7') + + def test_delete_server_interface(self): + server = openstack.compute.v2.server.Server(**{ + 'id': 'a34b5509-d122-4d2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {'region': '3'}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + + }) + + self.server_instance.resource_id = \ + 'a34b5509-d122-4d2f-823e-884bb559afe8' + self.fake_client.get_server = mock.MagicMock(return_value=server) + self.fake_client.delete_server_interface = \ + mock.MagicMock(return_value=None) + + response = self.server_instance.delete_server_interface( + 'a34b5509-d122-4d2f-823e-884bb559afe5') + self.assertIsNone(response) + + def test_add_security_group_to_server(self): + self.server_instance.resource_id = \ + 'a34b5509-d122-4d2f-823e-884bb559afe8' + self.fake_client.add_security_group_to_server = \ + mock.MagicMock(return_value=None) + + response = self.server_instance.add_security_group_to_server( + 'a34b5509-d122-4d2f-823e-884bb559afe4') + self.assertIsNone(response) + + def test_remove_security_group_from_server(self): + self.server_instance.resource_id = \ + 'a34b5509-d122-4d2f-823e-884bb559afe8' + self.fake_client.remove_security_group_from_server = \ + mock.MagicMock(return_value=None) + + response = self.server_instance.remove_security_group_from_server( + 'a34b5509-d122-4d2f-823e-884bb559afe4') + self.assertIsNone(response) + + def test_add_floating_ip_to_server(self): + self.server_instance.resource_id = \ + '1a34b5509-d122-4d2f-823e-884bb559afe8' + self.fake_client.add_floating_ip_to_server = \ + mock.MagicMock(return_value=None) + + response = self.server_instance.add_floating_ip_to_server( + 'a34b5509-d122-4d2f-823e-884bb559afe2') + self.assertIsNone(response) + + def test_remove_floating_ip_from_server(self): + self.server_instance.resource_id = \ + 'a34b5509-d122-4d2f-823e-884bb559afe8' + self.fake_client.remove_floating_ip_from_server = \ + mock.MagicMock(return_value=None) + + response = self.server_instance.remove_floating_ip_from_server( + 'a34b5509-d122-4d2f-823e-884bb559afe2') + self.assertIsNone(response) diff --git a/openstack_sdk/tests/compute/test_server_group.py b/openstack_sdk/tests/compute/test_server_group.py new file mode 100644 index 00000000..afe51b78 --- /dev/null +++ b/openstack_sdk/tests/compute/test_server_group.py @@ -0,0 +1,124 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Standard imports +import mock + +# Third party imports +import openstack.compute.v2.server_group + +# Local imports +from openstack_sdk.tests import base +from openstack_sdk.resources import compute + + +class ServerGroupTestCase(base.OpenStackSDKTestBase): + def setUp(self): + super(ServerGroupTestCase, self).setUp() + + self.fake_client =\ + self.generate_fake_openstack_connection('server_group') + + self.server_group_instance = compute.OpenstackServerGroup( + client_config=self.client_config, + logger=mock.MagicMock() + ) + self.server_group_instance.connection = self.connection + + def test_get_server_group(self): + server_group = openstack.compute.v2.server_group.ServerGroup(**{ + 'id': 'a34b5509-d122-4d2f-823e-884bb559afe8', + 'name': 'test_server_group', + 'members': ['server1', 'server2'], + 'metadata': {'k': 'v'}, + 'policies': ['anti-affinity'], + + }) + + self.server_group_instance.name = 'test_server_group' + self.server_group_instance.id = 'a34b5509-d122-4d2f-823e-884bb559afe8' + self.fake_client.get_server_group =\ + mock.MagicMock(return_value=server_group) + + response = self.server_group_instance.get() + self.assertEqual(response.id, 'a34b5509-d122-4d2f-823e-884bb559afe8') + self.assertEqual(response.name, 'test_server_group') + + def test_list_server_groups(self): + server_group_list = [ + openstack.compute.v2.server_group.ServerGroup(**{ + 'id': 'a34b5509-d122-4d2f-823e-884bb559afe8', + 'name': 'test_server_group', + 'members': ['server1', 'server2'], + 'metadata': {'k': 'v'}, + 'policies': ['anti-affinity'], + }), + openstack.compute.v2.server_group.ServerGroup(**{ + 'id': 'a34b5509-d122-4d2f-823e-884bb559afe7', + 'name': 'test_server_group', + 'members': ['server2', 'server3'], + 'metadata': {'k': 'v'}, + 'policies': ['anti-affinity'], + }), + ] + + self.fake_client.server_groups =\ + mock.MagicMock(return_value=server_group_list) + response = self.server_group_instance.list() + self.assertEqual(len(response), 2) + + def test_create_server_group(self): + config = { + 'name': 'test_server_group', + 'policies': ['anti-affinity'], + } + + server_group = { + 'id': 'a34b5509-d122-4d2f-823e-884bb559afe8', + 'name': 'test_server_group', + 'members': ['server1', 'server2'], + 'metadata': {'k': 'v'}, + 'policies': ['anti-affinity'], + } + + self.server_group_instance.config = config + new_res = openstack.compute.v2.server_group.ServerGroup(**server_group) + self.fake_client.create_server_group =\ + mock.MagicMock(return_value=new_res) + + response = self.server_group_instance.create() + self.assertEqual(response.name, config['name']) + + def test_delete_server_group(self): + server_group = openstack.compute.v2.server_group.ServerGroup(**{ + 'id': 'a34b5509-d122-4d2f-823e-884bb559afe8', + 'name': 'test_server_group', + 'members': ['server1', 'server2'], + 'metadata': {'k': 'v'}, + 'policies': ['anti-affinity'], + + }) + + self.server_group_instance.resource_id = \ + 'a34b5509-d122-4d2f-823e-884bb559afe8' + + self.fake_client.get_server_group = \ + mock.MagicMock(return_value=server_group) + + self.fake_client.delete_server_group =\ + mock.MagicMock(return_value=None) + + response = self.server_group_instance.delete() + self.assertIsNone(response) diff --git a/openstack_sdk/tests/identity/__init__.py b/openstack_sdk/tests/identity/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/openstack_sdk/tests/identity/test_project.py b/openstack_sdk/tests/identity/test_project.py new file mode 100644 index 00000000..3ee37974 --- /dev/null +++ b/openstack_sdk/tests/identity/test_project.py @@ -0,0 +1,168 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Standard imports +import mock + +# Third party imports +import openstack.identity.v3.project + +# Local imports +from openstack_sdk.tests import base +from openstack_sdk.resources import identity + + +class UserTestCase(base.OpenStackSDKTestBase): + def setUp(self): + super(UserTestCase, self).setUp() + self.fake_client = self.generate_fake_openstack_connection('project') + self.project_instance = identity.OpenstackProject( + client_config=self.client_config, + logger=mock.MagicMock() + ) + self.project_instance.connection = self.connection + + def test_get_project(self): + project = openstack.identity.v3.project.Project(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_project', + 'description': 'Testing Project', + 'domain_id': 'test_domain_id', + 'enabled': True, + 'is_domain': True, + 'links': ['test1', 'test2'], + 'parent_id': 'test_parent_id' + + }) + self.project_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_project = mock.MagicMock(return_value=project) + + response = self.project_instance.get() + self.assertEqual(response.id, 'a95b5509-c122-4c2f-823e-884bb559afe8') + self.assertEqual(response.name, 'test_project') + self.assertEqual(response.domain_id, 'test_domain_id') + + def test_list_projects(self): + projects = [ + openstack.identity.v3.project.Project(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_project_1', + 'description': 'Testing Project 1', + 'domain_id': 'test_domain_id', + 'enabled': True, + 'is_domain': True, + 'links': ['test1', 'test2'], + 'parent_id': 'test_parent_id' + }), + openstack.identity.v3.project.Project(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'name': 'test_project_1', + 'description': 'Testing Project 1', + 'domain_id': 'test_domain_id', + 'enabled': True, + 'is_domain': True, + 'links': ['test1', 'test2'], + 'parent_id': 'test_parent_id' + }), + ] + + self.fake_client.projects = mock.MagicMock(return_value=projects) + response = self.project_instance.list() + self.assertEqual(len(response), 2) + + def test_create_project(self): + project = { + 'name': 'test_project', + 'description': 'Testing Project', + 'domain_id': 'test_domain_id', + 'enabled': True, + 'is_domain': True, + 'tags': ['test1', 'test2'], + 'parent_id': 'test_parent_id' + } + + new_res = openstack.identity.v3.project.Project(**project) + self.project_instance.config = project + self.fake_client.create_project = mock.MagicMock(return_value=new_res) + + response = self.project_instance.create() + self.assertEqual(response.name, project['name']) + self.assertEqual(response.description, project['description']) + + def test_update_project(self): + old_project = openstack.identity.v3.project.Project(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_project', + 'description': 'Testing Project', + 'domain_id': 'test_domain_id', + 'enabled': True, + 'is_domain': True, + 'links': ['test1', 'test2'], + 'parent_id': 'test_parent_id' + + }) + + new_config = { + 'name': 'test_project_updated', + 'domain_id': 'test_updated_domain_id', + 'description': 'Testing Project 1', + 'enabled': False, + 'is_domain': False, + } + + new_project = openstack.identity.v3.project.Project(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_project_updated', + 'description': 'Testing Project 1', + 'domain_id': 'test_updated_domain_id', + 'enabled': False, + 'is_domain': False, + 'links': ['test1', 'test2'], + 'parent_id': 'test_parent_id' + + }) + + self.project_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_project = mock.MagicMock(return_value=old_project) + self.fake_client.update_project =\ + mock.MagicMock(return_value=new_project) + + response = self.project_instance.update(new_config=new_config) + self.assertNotEqual(response.name, old_project.name) + self.assertNotEqual(response.is_enabled, old_project.is_enabled) + self.assertNotEqual(response.description, old_project.description) + self.assertNotEqual(response.is_domain, old_project.is_domain) + + def test_delete_project(self): + project = openstack.identity.v3.project.Project(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_project', + 'description': 'Testing Project', + 'domain_id': 'test_domain_id', + 'enabled': True, + 'is_domain': True, + 'links': ['test1', 'test2'], + 'parent_id': 'test_parent_id' + }) + + self.project_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_project = mock.MagicMock(return_value=project) + self.fake_client.delete_project = mock.MagicMock(return_value=None) + + response = self.project_instance.delete() + self.assertIsNone(response) diff --git a/openstack_sdk/tests/identity/test_user.py b/openstack_sdk/tests/identity/test_user.py new file mode 100644 index 00000000..3b933882 --- /dev/null +++ b/openstack_sdk/tests/identity/test_user.py @@ -0,0 +1,132 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Standard imports +import mock + +# Third party imports +import openstack.identity.v2.user + +# Local imports +from openstack_sdk.tests import base +from openstack_sdk.resources import identity + + +class UserTestCase(base.OpenStackSDKTestBase): + def setUp(self): + super(UserTestCase, self).setUp() + self.fake_client = self.generate_fake_openstack_connection('user') + self.user_instance = identity.OpenstackUser( + client_config=self.client_config, + logger=mock.MagicMock() + ) + self.user_instance.connection = self.connection + + def test_get_user(self): + user = openstack.identity.v2.user.User(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_user', + 'is_enabled': True, + 'email': 'test_email@test.com', + + }) + self.user_instance.resource_id = 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_user = mock.MagicMock(return_value=user) + + response = self.user_instance.get() + self.assertEqual(response.id, 'a95b5509-c122-4c2f-823e-884bb559afe8') + self.assertEqual(response.name, 'test_user') + + def test_list_users(self): + users = [ + openstack.identity.v2.user.User(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_user_1', + 'is_enabled': True, + 'email': 'test1_email@test.com', + }), + openstack.identity.v2.user.User(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'name': 'test_user_2', + 'is_enabled': True, + 'email': 'test2_email@test.com', + }), + ] + + self.fake_client.users = mock.MagicMock(return_value=users) + + response = self.user_instance.list() + self.assertEqual(len(response), 2) + + def test_create_user(self): + user = { + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_user_1', + 'is_enabled': True, + 'email': 'test1_email@test.com', + } + + new_res = openstack.identity.v2.user.User(**user) + self.user_instance.config = user + self.fake_client.create_user = mock.MagicMock(return_value=new_res) + + response = self.user_instance.create() + self.assertEqual(response.name, user['name']) + + def test_update_user(self): + old_user = openstack.identity.v2.user.User(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_user_1', + 'is_enabled': True, + 'email': 'test1_email@test.com', + + }) + + new_config = { + 'name': 'test_updated_name', + 'is_enabled': False, + } + + new_user = openstack.identity.v2.user.User(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_updated_name', + 'is_enabled': False, + 'email': 'test1_email@test.com', + + }) + + self.user_instance.resource_id = 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_user = mock.MagicMock(return_value=old_user) + self.fake_client.update_user = mock.MagicMock(return_value=new_user) + + response = self.user_instance.update(new_config=new_config) + self.assertNotEqual(response.name, old_user.name) + self.assertNotEqual(response.is_enabled, old_user.is_enabled) + + def test_delete_user(self): + user = openstack.identity.v2.user.User(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_user', + 'is_enabled': True, + 'email': 'test_email@test.com', + + }) + + self.user_instance.resource_id = 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_user = mock.MagicMock(return_value=user) + self.fake_client.delete_user = mock.MagicMock(return_value=None) + + response = self.user_instance.delete() + self.assertIsNone(response) diff --git a/openstack_sdk/tests/image/__init__.py b/openstack_sdk/tests/image/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/openstack_sdk/tests/image/test_image.py b/openstack_sdk/tests/image/test_image.py new file mode 100644 index 00000000..e0c07bd8 --- /dev/null +++ b/openstack_sdk/tests/image/test_image.py @@ -0,0 +1,151 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Standard imports +import mock + +# Third party imports +import openstack.image.v2.image + +# Local imports +from openstack_sdk.tests import base +from openstack_sdk.resources import images + + +class ImageTestCase(base.OpenStackSDKTestBase): + def setUp(self): + super(ImageTestCase, self).setUp() + self.fake_client = self.generate_fake_openstack_connection('image') + self.image_instance = images.OpenstackImage( + client_config=self.client_config, + logger=mock.MagicMock() + ) + self.image_instance.connection = self.connection + + def test_get_image(self): + image = openstack.image.v2.image.Image(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_image', + 'container_format': 'test_bare', + 'disk_format': 'test_format', + 'checksum': '6d8f1c8cf05e1fbdc8b543fda1a9fa7f', + 'size': 258540032 + + }) + self.image_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_image = mock.MagicMock(return_value=image) + + response = self.image_instance.get() + self.assertEqual(response.id, 'a95b5509-c122-4c2f-823e-884bb559afe8') + self.assertEqual(response.name, 'test_image') + self.assertEqual(response.container_format, 'test_bare') + + def test_list_images(self): + image_list = [ + openstack.image.v2.image.Image(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_image_1', + 'container_format': 'test_bare', + 'disk_format': 'test_format_1', + 'checksum': '6d8f1c8cf05e1fbdc8b543fda1a9fa7f', + 'size': 258540032 + }), + openstack.image.v2.image.Image(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'name': 'test_image_2', + 'container_format': 'test_bare', + 'disk_format': 'test_format', + 'checksum': '4d421c8cf05e1fbdc8b543ded23dfsaf', + 'size': 223540032 + }) + ] + + self.fake_client.images = mock.MagicMock(return_value=image_list) + + response = self.image_instance.list() + self.assertEqual(len(response), 2) + + def test_create_image(self): + image = { + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'name': 'test_image_2', + 'container_format': 'test_bare', + 'disk_format': 'test_format', + 'checksum': '4d421c8cf05e1fbdc8b543ded23dfsaf', + 'size': 223540032, + } + new_res = openstack.image.v2.image.Image(**image) + self.image_instance.config = image + self.fake_client.upload_image = mock.MagicMock(return_value=new_res) + + response = self.image_instance.create() + self.assertEqual(response.id, image['id']) + self.assertEqual(response.name, image['name']) + + def test_update_image(self): + old_image = openstack.image.v2.image.Image(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'name': 'test_image_2', + 'container_format': 'test_bare', + 'disk_format': 'test_format', + 'checksum': '4d421c8cf05e1fbdc8b543ded23dfsaf', + 'size': 223540032, + 'visibility': True + + }) + + new_config = { + 'visibility': False + } + + new_image = openstack.image.v2.image.Image(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'name': 'test_image_2', + 'container_format': 'test_bare', + 'disk_format': 'test_format', + 'checksum': '4d421c8cf05e1fbdc8b543ded23dfsaf', + 'size': 223540032, + 'visibility': False + + }) + + self.image_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe7' + self.fake_client.get_image = mock.MagicMock(return_value=old_image) + self.fake_client.update_image = mock.MagicMock(return_value=new_image) + + response = self.image_instance.update(new_config=new_config) + self.assertNotEqual(response.visibility, old_image.visibility) + + def test_delete_image(self): + image = openstack.image.v2.image.Image(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'name': 'test_image_2', + 'container_format': 'test_bare', + 'disk_format': 'test_format', + 'checksum': '4d421c8cf05e1fbdc8b543ded23dfsaf', + 'size': 223540032, + 'visibility': True + + }) + + self.image_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe7' + self.fake_client.get_image = mock.MagicMock(return_value=image) + self.fake_client.delete_image = mock.MagicMock(return_value=None) + + response = self.image_instance.delete() + self.assertIsNone(response) diff --git a/openstack_sdk/tests/network/__init__.py b/openstack_sdk/tests/network/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/openstack_sdk/tests/network/test_floating_ip.py b/openstack_sdk/tests/network/test_floating_ip.py new file mode 100644 index 00000000..ca60734a --- /dev/null +++ b/openstack_sdk/tests/network/test_floating_ip.py @@ -0,0 +1,237 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Standard imports +import mock + +# Third party imports +import openstack.network.v2.floating_ip + +# Local imports +from openstack_sdk.tests import base +from openstack_sdk.resources import networks + + +class FloatingIPTestCase(base.OpenStackSDKTestBase): + def setUp(self): + super(FloatingIPTestCase, self).setUp() + + self.fake_client =\ + self.generate_fake_openstack_connection('floating_ip') + + self.floating_ip_instance = networks.OpenstackFloatingIP( + client_config=self.client_config, + logger=mock.MagicMock() + ) + self.floating_ip_instance.connection = self.connection + + def test_get_floating_ip(self): + floating_ip = openstack.network.v2.floating_ip.FloatingIP(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'description': 'test_description', + 'name': '127.0.0.1', + 'created_at': '0', + 'fixed_ip_address': '1', + 'floating_ip_address': '127.0.0.1', + 'floating_network_id': '3', + 'port_id': '5', + 'qos_policy_id': '51', + 'tenant_id': '6', + 'router_id': '7', + 'dns_domain': '9', + 'dns_name': '10', + 'status': 'ACTIVE', + 'revision_number': 12, + 'updated_at': '13', + 'subnet_id': '14', + 'tags': ['15', '16'] + + }) + self.floating_ip_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_ip = mock.MagicMock(return_value=floating_ip) + + response = self.floating_ip_instance.get() + self.assertEqual(response.id, 'a95b5509-c122-4c2f-823e-884bb559afe8') + self.assertEqual(response.name, '127.0.0.1') + self.assertEqual(response.description, 'test_description') + + def test_list_floating_ips(self): + ips = [ + openstack.network.v2.floating_ip.FloatingIP(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'description': 'test_description_1', + 'name': 'test_name_1', + 'created_at': '0', + 'fixed_ip_address': '1', + 'floating_ip_address': '127.0.0.1', + 'floating_network_id': '3', + 'port_id': '5', + 'qos_policy_id': '51', + 'tenant_id': '6', + 'router_id': '7', + 'dns_domain': '9', + 'dns_name': '10', + 'status': 'ACTIVE', + 'revision_number': 12, + 'updated_at': '13', + 'subnet_id': '14', + 'tags': ['15', '16'] + + }), + openstack.network.v2.floating_ip.FloatingIP(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'description': 'test_description_2', + 'name': 'test_name_2', + 'created_at': '0', + 'fixed_ip_address': '1', + 'floating_ip_address': '127.0.0.1', + 'floating_network_id': '3', + 'port_id': '5', + 'qos_policy_id': '51', + 'tenant_id': '6', + 'router_id': '7', + 'dns_domain': '9', + 'dns_name': '10', + 'status': 'ACTIVE', + 'revision_number': 12, + 'updated_at': '13', + 'subnet_id': '14', + 'tags': ['15', '16'] + + }) + ] + + self.fake_client.ips = mock.MagicMock(return_value=ips) + + response = self.floating_ip_instance.list() + self.assertEqual(len(response), 2) + + def test_create_floating_ip(self): + floating_ip = { + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'description': 'test_description', + 'name': '127.0.0.1', + 'fixed_ip_address': '1', + 'floating_ip_address': '127.0.0.1', + 'floating_network_id': '3', + 'port_id': '5', + 'tenant_id': '6', + 'router_id': '7', + 'dns_domain': '9', + 'dns_name': '10', + 'subnet_id': '14', + 'tags': ['15', '16'] + } + + new_res = openstack.network.v2.floating_ip.FloatingIP(**floating_ip) + self.floating_ip_instance.config = floating_ip + self.fake_client.create_ip = mock.MagicMock(return_value=new_res) + + response = self.floating_ip_instance.create() + self.assertEqual(response.name, floating_ip['name']) + self.assertEqual(response.description, floating_ip['description']) + + def test_update_floating_ip(self): + old_ip = openstack.network.v2.floating_ip.FloatingIP(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'description': 'test_description', + 'name': 'test_name_1', + 'created_at': '0', + 'fixed_ip_address': '1', + 'floating_ip_address': '127.0.0.1', + 'floating_network_id': '3', + 'port_id': '5', + 'qos_policy_id': '51', + 'tenant_id': '6', + 'router_id': '7', + 'dns_domain': '9', + 'dns_name': '10', + 'status': 'ACTIVE', + 'revision_number': 12, + 'updated_at': '13', + 'subnet_id': '14', + 'tags': ['15', '16'] + + }) + + new_config = { + 'port_id': '7', + 'fixed_ip_address': '2', + 'description': 'test_description_update', + } + + new_ip = openstack.network.v2.floating_ip.FloatingIP(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'description': 'test_description_update', + 'name': 'test_name', + 'created_at': '0', + 'fixed_ip_address': '2', + 'floating_ip_address': '127.0.0.1', + 'floating_network_id': '3', + 'port_id': '7', + 'qos_policy_id': '51', + 'tenant_id': '6', + 'router_id': '7', + 'dns_domain': '9', + 'dns_name': '10', + 'status': 'ACTIVE', + 'revision_number': 12, + 'updated_at': '13', + 'subnet_id': '14', + 'tags': ['15', '16'] + + }) + + self.floating_ip_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_ip = mock.MagicMock(return_value=old_ip) + self.fake_client.update_ip = mock.MagicMock(return_value=new_ip) + + response = self.floating_ip_instance.update(new_config=new_config) + self.assertNotEqual(response.description, old_ip.description) + self.assertNotEqual(response.fixed_ip_address, old_ip.fixed_ip_address) + self.assertNotEqual(response.port_id, old_ip.port_id) + + def test_delete_floating_ip(self): + floating_ip = openstack.network.v2.floating_ip.FloatingIP(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'description': 'test_description', + 'name': 'test_name', + 'created_at': '0', + 'fixed_ip_address': '1', + 'floating_ip_address': '127.0.0.1', + 'floating_network_id': '3', + 'port_id': '5', + 'qos_policy_id': '51', + 'tenant_id': '6', + 'router_id': '7', + 'dns_domain': '9', + 'dns_name': '10', + 'status': 'ACTIVE', + 'revision_number': 12, + 'updated_at': '13', + 'subnet_id': '14', + 'tags': ['15', '16'] + + }) + + self.floating_ip_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_ip = mock.MagicMock(return_value=floating_ip) + self.fake_client.delete_ip = mock.MagicMock(return_value=None) + + response = self.floating_ip_instance.delete() + self.assertIsNone(response) diff --git a/openstack_sdk/tests/network/test_network.py b/openstack_sdk/tests/network/test_network.py new file mode 100644 index 00000000..a294e014 --- /dev/null +++ b/openstack_sdk/tests/network/test_network.py @@ -0,0 +1,285 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Standard imports +import mock + +# Third party imports +import openstack.network.v2.network + +# Local imports +from openstack_sdk.tests import base +from openstack_sdk.resources import networks + + +class NetworkTestCase(base.OpenStackSDKTestBase): + def setUp(self): + super(NetworkTestCase, self).setUp() + self.fake_client = self.generate_fake_openstack_connection('network') + self.network_instance = networks.OpenstackNetwork( + client_config=self.client_config, + logger=mock.MagicMock() + ) + self.network_instance.connection = self.connection + + def test_get_network(self): + net = openstack.network.v2.network.Network(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_network', + 'admin_state_up': True, + 'availability_zone_hints': ['1', '2'], + 'availability_zones': ['3'], + 'external': True, + 'created_at': '2016-03-09T12:14:57.233772', + 'description': '4', + 'dns_domain': '5', + 'ipv4_address_scope': '6', + 'ipv6_address_scope': '7', + 'is_default': False, + 'mtu': 8, + 'port_security_enabled': True, + 'project_id': '10', + 'provider:network_type': '11', + 'provider:physical_network': '12', + 'provider:segmentation_id': '13', + 'qos_policy_id': '14', + 'revision_number': 15, + 'router:external': True, + 'segments': '16', + 'shared': True, + 'status': '17', + 'subnets': ['18', '19'], + 'updated_at': '2016-07-09T12:14:57.233772', + 'vlan_transparent': False, + + }) + self.network_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_network = mock.MagicMock(return_value=net) + + response = self.network_instance.get() + self.assertEqual(response.id, 'a95b5509-c122-4c2f-823e-884bb559afe8') + self.assertEqual(response.name, 'test_network') + self.assertEqual(response.is_router_external, True) + + def test_list_networks(self): + nets = [ + openstack.network.v2.network.Network(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_network_1', + 'description': 'test_description_1', + 'admin_state_up': True, + 'availability_zone_hints': ['1', '2'], + 'availability_zones': ['3'], + 'external': True, + 'created_at': '2016-03-09T12:14:57.233772', + 'dns_domain': '5', + 'ipv4_address_scope': '6', + 'ipv6_address_scope': '7', + 'is_default': False, + 'mtu': 8, + 'port_security_enabled': True, + 'project_id': '10', + 'provider:network_type': '11', + 'provider:physical_network': '12', + 'provider:segmentation_id': '13', + 'qos_policy_id': '14', + 'revision_number': 15, + 'router:external': True, + 'segments': '16', + 'shared': True, + 'status': '17', + 'subnets': ['18', '19'], + 'updated_at': '2016-07-09T12:14:57.233772', + 'vlan_transparent': False, + + }), + openstack.network.v2.network.Network(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'name': 'test_network_2', + 'description': 'test_description_2', + 'admin_state_up': True, + 'availability_zone_hints': ['1', '2'], + 'availability_zones': ['3'], + 'external': True, + 'created_at': '2016-03-09T12:14:57.233772', + 'dns_domain': '5', + 'ipv4_address_scope': '6', + 'ipv6_address_scope': '7', + 'is_default': False, + 'mtu': 8, + 'port_security_enabled': True, + 'project_id': '10', + 'provider:network_type': '11', + 'provider:physical_network': '12', + 'provider:segmentation_id': '13', + 'qos_policy_id': '14', + 'revision_number': 15, + 'router:external': True, + 'segments': '16', + 'shared': True, + 'status': '17', + 'subnets': ['18', '19'], + 'updated_at': '2016-07-09T12:14:57.233772', + 'vlan_transparent': False, + + }) + ] + + self.fake_client.networks = mock.MagicMock(return_value=nets) + response = self.network_instance.list() + self.assertEqual(len(response), 2) + + def test_create_network(self): + net = { + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_network', + 'description': 'test_description', + 'admin_state_up': True, + 'availability_zone_hints': ['1', '2'], + 'availability_zones': ['3'], + 'project_id': '10', + } + + new_res = openstack.network.v2.network.Network(**net) + self.network_instance.config = net + self.fake_client.create_network = mock.MagicMock(return_value=new_res) + + response = self.network_instance.create() + self.assertEqual(response.name, net['name']) + self.assertEqual(response.description, net['description']) + + def test_update_network(self): + old_network = openstack.network.v2.network.Network(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_network', + 'description': 'test_description', + 'admin_state_up': True, + 'availability_zone_hints': ['1', '2'], + 'availability_zones': ['3'], + 'external': True, + 'created_at': '2016-03-09T12:14:57.233772', + 'dns_domain': '5', + 'ipv4_address_scope': '6', + 'ipv6_address_scope': '7', + 'is_default': False, + 'mtu': 8, + 'port_security_enabled': True, + 'project_id': '10', + 'provider:network_type': '11', + 'provider:physical_network': '12', + 'provider:segmentation_id': '13', + 'qos_policy_id': '14', + 'revision_number': 15, + 'router:external': True, + 'segments': '16', + 'shared': True, + 'status': '17', + 'subnets': ['18', '19'], + 'updated_at': '2016-07-09T12:14:57.233772', + 'vlan_transparent': False, + + }) + + new_config = { + 'name': 'test_network_updated', + 'description': 'test_description_updated', + 'mtu': 10, + 'admin_state_up': False, + } + + new_network = openstack.network.v2.network.Network(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_network_updated', + 'description': 'test_description_updated', + 'admin_state_up': False, + 'availability_zone_hints': ['1', '2'], + 'availability_zones': ['3'], + 'external': True, + 'created_at': '2016-03-09T12:14:57.233772', + 'dns_domain': '5', + 'ipv4_address_scope': '6', + 'ipv6_address_scope': '7', + 'is_default': False, + 'mtu': 10, + 'port_security_enabled': True, + 'project_id': '10', + 'provider:network_type': '11', + 'provider:physical_network': '12', + 'provider:segmentation_id': '13', + 'qos_policy_id': '14', + 'revision_number': 15, + 'router:external': True, + 'segments': '16', + 'shared': True, + 'status': '17', + 'subnets': ['18', '19'], + 'updated_at': '2016-07-09T12:14:57.233772', + 'vlan_transparent': False, + + }) + + self.network_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_network = mock.MagicMock(return_value=old_network) + self.fake_client.update_network = \ + mock.MagicMock(return_value=new_network) + + response = self.network_instance.update(new_config=new_config) + self.assertNotEqual(response.name, old_network.name) + self.assertNotEqual(response.description, old_network.description) + self.assertNotEqual(response.mtu, old_network.mtu) + self.assertNotEqual(response.is_admin_state_up, + old_network.is_admin_state_up) + + def test_delete_network(self): + net = openstack.network.v2.network.Network(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_network', + 'description': 'test_description', + 'admin_state_up': True, + 'availability_zone_hints': ['1', '2'], + 'availability_zones': ['3'], + 'external': True, + 'created_at': '2016-03-09T12:14:57.233772', + 'dns_domain': '5', + 'ipv4_address_scope': '6', + 'ipv6_address_scope': '7', + 'is_default': False, + 'mtu': 8, + 'port_security_enabled': True, + 'project_id': '10', + 'provider:network_type': '11', + 'provider:physical_network': '12', + 'provider:segmentation_id': '13', + 'qos_policy_id': '14', + 'revision_number': 15, + 'router:external': True, + 'segments': '16', + 'shared': True, + 'status': '17', + 'subnets': ['18', '19'], + 'updated_at': '2016-07-09T12:14:57.233772', + 'vlan_transparent': False, + + }) + + self.network_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_network = mock.MagicMock(return_value=net) + self.fake_client.delete_network = mock.MagicMock(return_value=None) + + response = self.network_instance.delete() + self.assertIsNone(response) diff --git a/openstack_sdk/tests/network/test_port.py b/openstack_sdk/tests/network/test_port.py new file mode 100644 index 00000000..c05aae0f --- /dev/null +++ b/openstack_sdk/tests/network/test_port.py @@ -0,0 +1,299 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Standard imports +import mock + +# Third party imports +import openstack.network.v2.port + +# Local imports +from openstack_sdk.tests import base +from openstack_sdk.resources import networks + + +class PortTestCase(base.OpenStackSDKTestBase): + def setUp(self): + super(PortTestCase, self).setUp() + self.fake_client = self.generate_fake_openstack_connection('port') + self.port_instance = networks.OpenstackPort( + client_config=self.client_config, + logger=mock.MagicMock() + ) + self.port_instance.connection = self.connection + + def test_get_port(self): + port = openstack.network.v2.port.Port(**{ + 'id': '1', + 'name': 'test_port', + 'admin_state_up': True, + 'binding_host_id': '3', + 'binding_profile': {'4': 4}, + 'binding_vif_details': {'5': 5}, + 'binding_vif_type': '6', + 'binding_vnic_type': '7', + 'created_at': '2016-03-09T12:14:57.233772', + 'data_plane_status': '32', + 'description': '8', + 'device_id': '9', + 'device_owner': '10', + 'dns_assignment': [{'11': '11'}], + 'dns_domain': 'a11', + 'dns_name': '12', + 'extra_dhcp_opts': [{'13': '13'}], + 'fixed_ips': [{'10.0.0.1': '10.0.0.2'}], + 'allowed_address_pairs': ['10.0.0.3', '10.0.0.4'], + 'mac_address': '00-14-22-01-23-45', + 'network_id': '18', + 'port_security_enabled': True, + 'qos_policy_id': '21', + 'revision_number': '22', + 'security_groups': ['23'], + 'status': '25', + 'tenant_id': '26', + 'updated_at': '2016-07-09T12:14:57.233772', + }) + self.port_instance.resource_id = '1' + self.fake_client.get_port = mock.MagicMock(return_value=port) + + response = self.port_instance.get() + self.assertEqual(response.id, '1') + self.assertEqual(response.name, 'test_port') + self.assertEqual(response.is_admin_state_up, True) + self.assertEqual(response.binding_host_id, '3') + self.assertEqual(response.binding_profile, {'4': 4}) + + def test_list_ports(self): + ports = [ + openstack.network.v2.port.Port(**{ + 'id': '1', + 'name': 'test_port_1', + 'description': 'test_port_description_1', + 'admin_state_up': True, + 'binding_host_id': '3', + 'binding_profile': {'4': 4}, + 'binding_vif_details': {'5': 5}, + 'binding_vif_type': '6', + 'binding_vnic_type': '7', + 'created_at': '2016-03-09T12:14:57.233772', + 'data_plane_status': '32', + 'device_id': '9', + 'device_owner': '10', + 'dns_assignment': [{'11': 11}], + 'dns_domain': 'a11', + 'dns_name': '12', + 'extra_dhcp_opts': [{'13': 13}], + 'fixed_ips': [{'10.0.0.1': '10.0.0.2'}], + 'allowed_address_pairs': ['10.0.0.3', '10.0.0.4'], + 'mac_address': '00-14-22-01-23-45', + 'network_id': '18', + 'port_security_enabled': True, + 'qos_policy_id': '21', + 'revision_number': 22, + 'security_groups': ['23'], + 'status': '25', + 'tenant_id': '26', + 'updated_at': '2016-07-09T12:14:57.233772', + + }), + openstack.network.v2.port.Port(**{ + 'id': '2', + 'name': 'test_port_2', + 'description': 'test_port_description_2', + 'admin_state_up': True, + 'binding_host_id': '3', + 'binding_profile': {'4': 4}, + 'binding_vif_details': {'5': 5}, + 'binding_vif_type': '6', + 'binding_vnic_type': '7', + 'created_at': '2016-03-09T12:14:57.233772', + 'data_plane_status': '32', + 'device_id': '9', + 'device_owner': '10', + 'dns_assignment': [{'11': '11'}], + 'dns_domain': 'a11', + 'dns_name': '12', + 'extra_dhcp_opts': [{'13': '13'}], + 'fixed_ips': [{'10.0.0.5': '10.0.0.6'}], + 'allowed_address_pairs': ['10.0.0.7', '10.0.0.9'], + 'mac_address': '00-14-22-01-23-45', + 'network_id': '18', + 'port_security_enabled': True, + 'qos_policy_id': '21', + 'revision_number': 22, + 'security_groups': ['23'], + 'status': '25', + 'tenant_id': '26', + 'updated_at': '2016-07-09T12:14:57.233772', + + }) + ] + + self.fake_client.ports = mock.MagicMock(return_value=ports) + response = self.port_instance.list() + self.assertEqual(len(response), 2) + + def test_create_port(self): + port = { + 'name': 'test_port', + 'admin_state_up': True, + 'description': 'test_port_description', + 'device_id': '9', + 'device_owner': '10', + 'dns_assignment': [{'11': '11'}], + 'dns_domain': 'a11', + 'dns_name': '12', + 'extra_dhcp_opts': [{'13': '13'}], + 'fixed_ips': [{'10.0.0.1': '10.0.0.2'}], + 'allowed_address_pairs': ['10.0.0.3', '10.0.0.4'], + 'mac_address': '00-14-22-01-23-45', + 'network_id': '18', + 'port_security_enabled': True, + 'qos_policy_id': '21', + 'security_groups': ['23'], + 'tenant_id': '26', + } + + new_res = openstack.network.v2.port.Port(**port) + self.port_instance.config = port + self.fake_client.create_port = mock.MagicMock(return_value=new_res) + + response = self.port_instance.create() + self.assertEqual(response.name, port['name']) + self.assertEqual(response.description, port['description']) + + def test_update_port(self): + old_port = openstack.network.v2.port.Port(**{ + 'id': '1', + 'name': 'test_port', + 'description': 'test_port_description', + 'admin_state_up': True, + 'binding_host_id': '3', + 'binding_profile': {'4': '4'}, + 'binding_vif_details': {'5': '5'}, + 'binding_vif_type': '6', + 'binding_vnic_type': '7', + 'created_at': '2016-03-09T12:14:57.233772', + 'data_plane_status': '32', + 'device_id': '9', + 'device_owner': '10', + 'dns_assignment': [{'11': 11}], + 'dns_domain': 'a11', + 'dns_name': '12', + 'extra_dhcp_opts': [{'13': '13'}], + 'fixed_ips': [{'10.0.0.1': '10.0.0.2'}], + 'allowed_address_pairs': ['10.0.0.3', '10.0.0.4'], + 'mac_address': '00-14-22-01-23-45', + 'network_id': '18', + 'port_security_enabled': True, + 'qos_policy_id': '21', + 'revision_number': 22, + 'security_groups': ['23'], + 'status': '25', + 'tenant_id': '26', + 'updated_at': '2016-07-09T12:14:57.233772', + + }) + + new_config = { + 'name': 'test_port_updated', + 'description': 'test_port_description_updated', + 'dns_domain': '123', + 'admin_state_up': False, + } + + new_port = openstack.network.v2.port.Port(**{ + 'id': '1', + 'name': 'test_port_updated', + 'description': 'test_port_description_updated', + 'admin_state_up': False, + 'binding_host_id': '3', + 'binding_profile': {'4': 4}, + 'binding_vif_details': {'5': 5}, + 'binding_vif_type': '6', + 'binding_vnic_type': '7', + 'created_at': '2016-03-09T12:14:57.233772', + 'data_plane_status': '32', + 'device_id': '9', + 'device_owner': '10', + 'dns_assignment': [{'11': 11}], + 'dns_domain': '123', + 'dns_name': '12', + 'extra_dhcp_opts': [{'13': 13}], + 'fixed_ips': [{'10.0.0.1': '10.0.0.2'}], + 'allowed_address_pairs': ['10.0.0.3', '10.0.0.4'], + 'mac_address': '00-14-22-01-23-45', + 'network_id': '18', + 'port_security_enabled': True, + 'qos_policy_id': '21', + 'revision_number': 22, + 'security_groups': ['23'], + 'status': '25', + 'tenant_id': '26', + 'updated_at': '2016-07-09T12:14:57.233772', + + }) + + self.port_instance.resource_id = '1' + self.fake_client.get_port = mock.MagicMock(return_value=old_port) + self.fake_client.update_port = \ + mock.MagicMock(return_value=new_port) + + response = self.port_instance.update(new_config=new_config) + self.assertNotEqual(response.name, old_port.name) + self.assertNotEqual(response.description, old_port.description) + self.assertNotEqual(response.dns_domain, old_port.dns_domain) + self.assertNotEqual(response.is_admin_state_up, + old_port.is_admin_state_up) + + def test_delete_port(self): + port = openstack.network.v2.port.Port(**{ + 'id': '1', + 'name': 'test_port', + 'description': 'test_port_description', + 'admin_state_up': True, + 'binding_host_id': '3', + 'binding_profile': {'4': 4}, + 'binding_vif_details': {'5': 5}, + 'binding_vif_type': '6', + 'binding_vnic_type': '7', + 'created_at': '2016-03-09T12:14:57.233772', + 'data_plane_status': '32', + 'device_id': '9', + 'device_owner': '10', + 'dns_assignment': [{'11': 11}], + 'dns_domain': 'a11', + 'dns_name': '12', + 'extra_dhcp_opts': [{'13': 13}], + 'fixed_ips': [{'10.0.0.1': '10.0.0.2'}], + 'allowed_address_pairs': ['10.0.0.3', '10.0.0.4'], + 'mac_address': '00-14-22-01-23-45', + 'network_id': '18', + 'port_security_enabled': True, + 'qos_policy_id': '21', + 'revision_number': 22, + 'security_groups': ['23'], + 'status': '25', + 'tenant_id': '26', + 'updated_at': '2016-07-09T12:14:57.233772', + + }) + + self.port_instance.resource_id = '1' + self.fake_client.get_port = mock.MagicMock(return_value=port) + self.fake_client.delete_port = mock.MagicMock(return_value=None) + + response = self.port_instance.delete() + self.assertIsNone(response) diff --git a/openstack_sdk/tests/network/test_rbac_policy.py b/openstack_sdk/tests/network/test_rbac_policy.py new file mode 100644 index 00000000..d67b90bc --- /dev/null +++ b/openstack_sdk/tests/network/test_rbac_policy.py @@ -0,0 +1,162 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Standard imports +import mock + +# Third party imports +import openstack.network.v2.rbac_policy + +# Local imports +from openstack_sdk.tests import base +from openstack_sdk.resources import networks + + +class RBACPolicyTestCase(base.OpenStackSDKTestBase): + def setUp(self): + super(RBACPolicyTestCase, self).setUp() + + self.fake_client = \ + self.generate_fake_openstack_connection('rbac_policy') + + self.rbac_policy_instance = networks.OpenstackRBACPolicy( + client_config=self.client_config, + logger=mock.MagicMock() + ) + self.rbac_policy_instance.connection = self.connection + + def test_get_rbac_policy(self): + sg = openstack.network.v2.rbac_policy.RBACPolicy(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': None, + 'target_project_id': 'test_target_project_id', + 'object_type': 'test_object_type', + 'object_id': 3, + 'location': None, + 'action': 'test_action', + 'project_id': 4 + }) + self.rbac_policy_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_rbac_policy = mock.MagicMock(return_value=sg) + + response = self.rbac_policy_instance.get() + self.assertEqual(response.id, 'a95b5509-c122-4c2f-823e-884bb559afe8') + self.assertEqual(response.target_project_id, 'test_target_project_id') + + def test_list_rbac_policies(self): + policies = [ + openstack.network.v2.rbac_policy.RBACPolicy(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': None, + 'target_project_id': 'test_target_project_id_1', + 'object_type': 'test_object_type_1', + 'object_id': 3, + 'location': None, + 'action': 'test_action_1', + 'project_id': 4 + + }), + openstack.network.v2.rbac_policy.RBACPolicy(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'name': None, + 'target_project_id': 'test_target_project_id_2', + 'object_type': 'test_object_type_2', + 'object_id': 4, + 'location': None, + 'action': 'test_action_2', + 'project_id': 5, + }) + ] + + self.fake_client.rbac_policies = \ + mock.MagicMock(return_value=policies) + response = self.rbac_policy_instance.list() + self.assertEqual(len(response), 2) + + def test_create_rbac_policy(self): + policy = \ + { + 'target_tenant': 'test_target_tenant', + 'object_type': 'test_object_type', + 'object_id': 1, + 'action': 'test_action', + } + + new_res = openstack.network.v2.rbac_policy.RBACPolicy(**policy) + self.rbac_policy_instance.config = policy + self.fake_client.create_rbac_policy = \ + mock.MagicMock(return_value=new_res) + + response = self.rbac_policy_instance.create() + self.assertEqual(response.object_id, policy['object_id']) + self.assertEqual(response.object_type, policy['object_type']) + + def test_update_rbac_policy(self): + old_policy = openstack.network.v2.rbac_policy.RBACPolicy(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'target_tenant': 'test_target_tenant', + 'object_type': 'test_object_type', + 'object_id': 1, + 'action': 'test_action', + }) + + new_config = { + 'target_tenant': 'test_target_tenant_update', + } + + new_policy = openstack.network.v2.rbac_policy.RBACPolicy(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': None, + 'target_project_id': 'test_target_tenant_update', + 'object_type': 'test_object_type', + 'object_id': 3, + 'location': None, + 'action': 'test_action', + 'project_id': 3, + + }) + + self.rbac_policy_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_rbac_policy = \ + mock.MagicMock(return_value=old_policy) + self.fake_client.update_rbac_policy =\ + mock.MagicMock(return_value=new_policy) + + response = self.rbac_policy_instance.update(new_config=new_config) + self.assertNotEqual(response.target_project_id, + old_policy.target_project_id) + + def test_delete_security_group(self): + policy = openstack.network.v2.rbac_policy.RBACPolicy(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': None, + 'target_project_id': 'test_target_project_id_1', + 'object_type': 'test_object_type_1', + 'object_id': 3, + 'location': None, + 'action': 'test_action_1', + 'project_id': 4 + + }), + + self.rbac_policy_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_rbac_policy = mock.MagicMock(return_value=policy) + self.fake_client.delete_rbac_policy = mock.MagicMock(return_value=None) + + response = self.rbac_policy_instance.delete() + self.assertIsNone(response) diff --git a/openstack_sdk/tests/network/test_router.py b/openstack_sdk/tests/network/test_router.py new file mode 100644 index 00000000..57990f26 --- /dev/null +++ b/openstack_sdk/tests/network/test_router.py @@ -0,0 +1,271 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Standard imports +import mock + +# Third party imports +import openstack.network.v2.router + +# Local imports +from openstack_sdk.tests import base +from openstack_sdk.resources import networks + + +class RouterTestCase(base.OpenStackSDKTestBase): + def setUp(self): + super(RouterTestCase, self).setUp() + self.fake_client = self.generate_fake_openstack_connection('router') + self.router_instance = networks.OpenstackRouter( + client_config=self.client_config, + logger=mock.MagicMock() + ) + self.router_instance.connection = self.connection + + def test_get_router(self): + router = openstack.network.v2.router.Router(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_name', + 'description': 'test_description', + 'availability_zone_hints': ['1'], + 'availability_zones': ['2'], + 'created_at': 'timestamp1', + 'distributed': False, + 'external_gateway_info': {'4': 4}, + 'flavor_id': '5', + 'ha': False, + 'revision': 7, + 'routes': ['8'], + 'status': '9', + 'tenant_id': '10', + 'updated_at': 'timestamp2', + + }) + self.router_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_router = mock.MagicMock(return_value=router) + + response = self.router_instance.get() + self.assertEqual(response.id, 'a95b5509-c122-4c2f-823e-884bb559afe8') + self.assertEqual(response.name, 'test_name') + self.assertEqual(response.flavor_id, '5') + + def test_list_routers(self): + routers = [ + openstack.network.v2.router.Router(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_name_1', + 'description': 'test_description_1', + 'availability_zone_hints': ['1'], + 'availability_zones': ['2'], + 'created_at': 'timestamp1', + 'distributed': False, + 'external_gateway_info': {'4': 4}, + 'flavor_id': '5', + 'ha': False, + 'revision': 7, + 'routes': ['8'], + 'status': '9', + 'tenant_id': '10', + 'updated_at': 'timestamp2', + + }), + openstack.network.v2.router.Router(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'name': 'test_name_2', + 'description': 'test_description_2', + 'availability_zone_hints': ['1'], + 'availability_zones': ['2'], + 'created_at': 'timestamp1', + 'distributed': False, + 'external_gateway_info': {'4': 4}, + 'flavor_id': '5', + 'ha': False, + 'revision': 7, + 'routes': ['8'], + 'status': '9', + 'tenant_id': '10', + 'updated_at': 'timestamp2', + + }) + ] + + self.fake_client.routers = mock.MagicMock(return_value=routers) + response = self.router_instance.list() + self.assertEqual(len(response), 2) + + def test_create_router(self): + router = { + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_name', + 'description': 'test_description', + 'availability_zone_hints': ['1'], + 'availability_zones': ['2'], + 'distributed': False, + 'flavor_id': '5', + 'ha': False, + 'routes': ['8'], + 'tenant_id': '10', + } + + new_res = openstack.network.v2.router.Router(**router) + self.router_instance.config = router + self.fake_client.create_router = mock.MagicMock(return_value=new_res) + + response = self.router_instance.create() + self.assertEqual(response.name, router['name']) + self.assertEqual(response.description, router['description']) + + def test_update_router(self): + old_router = openstack.network.v2.router.Router(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_name', + 'description': 'test_description', + 'availability_zone_hints': ['1'], + 'availability_zones': ['2'], + 'created_at': 'timestamp1', + 'distributed': False, + 'external_gateway_info': {'4': 4}, + 'flavor_id': '5', + 'ha': False, + 'revision': 7, + 'routes': ['8'], + 'status': '9', + 'tenant_id': '10', + 'updated_at': 'timestamp2', + + }) + + new_config = { + 'name': 'test_name_update', + 'description': 'test_description_update', + 'distributed': True + } + + new_router = openstack.network.v2.router.Router(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_name_update', + 'description': 'test_description_update', + 'availability_zone_hints': ['1'], + 'availability_zones': ['2'], + 'created_at': 'timestamp1', + 'distributed': True, + 'external_gateway_info': {'4': 4}, + 'flavor_id': '5', + 'ha': False, + 'revision': 7, + 'routes': ['8'], + 'status': '9', + 'tenant_id': '10', + 'updated_at': 'timestamp2', + + }) + + self.router_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_router = mock.MagicMock(return_value=old_router) + self.fake_client.update_router = \ + mock.MagicMock(return_value=new_router) + + response = self.router_instance.update(new_config=new_config) + self.assertNotEqual(response.name, old_router.name) + self.assertNotEqual(response.description, old_router.description) + self.assertNotEqual(response.is_distributed, old_router.is_distributed) + + def test_delete_router(self): + router = openstack.network.v2.router.Router(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_name', + 'description': 'test_description', + 'availability_zone_hints': ['1'], + 'availability_zones': ['2'], + 'created_at': 'timestamp1', + 'distributed': False, + 'external_gateway_info': {'4': 4}, + 'flavor_id': '5', + 'ha': False, + 'revision': 7, + 'routes': ['8'], + 'status': '9', + 'tenant_id': '10', + 'updated_at': 'timestamp2', + + }) + + self.router_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_router = mock.MagicMock(return_value=router) + self.fake_client.delete_router = mock.MagicMock(return_value=None) + + response = self.router_instance.delete() + self.assertIsNone(response) + + def test_add_interface_router(self): + router = openstack.network.v2.router.Router(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_name', + 'description': 'test_description', + 'availability_zone_hints': ['1'], + 'availability_zones': ['2'], + 'created_at': 'timestamp1', + 'distributed': False, + 'external_gateway_info': {'4': 4}, + 'flavor_id': '5', + 'ha': False, + 'revision': 7, + 'routes': ['8'], + 'status': '9', + 'tenant_id': '10', + 'updated_at': 'timestamp2', + + }) + + self.router_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_router = mock.MagicMock(return_value=router) + self.fake_client.add_interface_to_router = \ + mock.MagicMock(return_value=None) + + response = self.router_instance.add_interface({'subnet_id': 'abc'}) + self.assertIsNone(response) + + def test_remove_interface_router(self): + router = openstack.network.v2.router.Router(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_name', + 'description': 'test_description', + 'availability_zone_hints': ['1'], + 'availability_zones': ['2'], + 'created_at': 'timestamp1', + 'distributed': False, + 'external_gateway_info': {'4': 4}, + 'flavor_id': '5', + 'ha': False, + 'revision': 7, + 'routes': ['8'], + 'status': '9', + 'tenant_id': '10', + 'updated_at': 'timestamp2', + + }) + + self.router_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_router = mock.MagicMock(return_value=router) + self.fake_client.remove_interface_from_router = \ + mock.MagicMock(return_value=None) + + response = self.router_instance.remove_interface({'subnet_id': 'abc'}) + self.assertIsNone(response) diff --git a/openstack_sdk/tests/network/test_security_group.py b/openstack_sdk/tests/network/test_security_group.py new file mode 100644 index 00000000..a62fa048 --- /dev/null +++ b/openstack_sdk/tests/network/test_security_group.py @@ -0,0 +1,173 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Standard imports +import mock + +# Third party imports +import openstack.network.v2.security_group + +# Local imports +from openstack_sdk.tests import base +from openstack_sdk.resources import networks + + +class SecurityGroupTestCase(base.OpenStackSDKTestBase): + def setUp(self): + super(SecurityGroupTestCase, self).setUp() + + self.fake_client =\ + self.generate_fake_openstack_connection('security_group') + + self.security_group_instance = networks.OpenstackSecurityGroup( + client_config=self.client_config, + logger=mock.MagicMock() + ) + self.security_group_instance.connection = self.connection + + def test_get_security_group(self): + sg = openstack.network.v2.security_group.SecurityGroup(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_name', + 'created_at': '2016-10-04T12:14:57.233772', + 'description': '1', + 'revision_number': 3, + 'tenant_id': '4', + 'updated_at': '2016-10-14T12:16:57.233772', + 'tags': ['5'] + }) + self.security_group_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_security_group = mock.MagicMock(return_value=sg) + + response = self.security_group_instance.get() + self.assertEqual(response.id, 'a95b5509-c122-4c2f-823e-884bb559afe8') + self.assertEqual(response.name, 'test_name') + + def test_list_security_groups(self): + sgs = [ + openstack.network.v2.security_group.SecurityGroup(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_name_1', + 'created_at': '2016-10-04T12:14:57.233772', + 'description': '1', + 'revision_number': 3, + 'tenant_id': '4', + 'updated_at': '2016-10-14T12:16:57.233772', + 'tags': ['5'] + + }), + openstack.network.v2.security_group.SecurityGroup(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'name': 'test_name_2', + 'created_at': '2016-10-04T12:14:57.233772', + 'description': '1', + 'revision_number': 3, + 'tenant_id': '4', + 'updated_at': '2016-10-14T12:16:57.233772', + 'tags': ['5'] + + }) + ] + + self.fake_client.security_groups = mock.MagicMock(return_value=sgs) + response = self.security_group_instance.list() + self.assertEqual(len(response), 2) + + def test_create_security_group(self): + sg = { + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_name', + 'description': 'test_description', + 'availability_zone_hints': ['1'], + 'availability_zones': ['2'], + 'distributed': False, + 'flavor_id': '5', + 'ha': False, + 'routes': ['8'], + 'tenant_id': '10', + } + + new_res = openstack.network.v2.security_group.SecurityGroup(**sg) + self.security_group_instance.config = sg + self.fake_client.create_security_group =\ + mock.MagicMock(return_value=new_res) + + response = self.security_group_instance.create() + self.assertEqual(response.name, sg['name']) + self.assertEqual(response.description, sg['description']) + + def test_update_security_group(self): + old_sg = openstack.network.v2.security_group.SecurityGroup(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_name', + 'created_at': '2016-10-04T12:14:57.233772', + 'description': '1', + 'revision_number': 3, + 'tenant_id': '4', + 'updated_at': '2016-10-14T12:16:57.233772', + 'tags': ['5'] + + }) + + new_config = { + 'name': 'test_name_update', + 'description': 'test_description_update', + } + + new_sg = openstack.network.v2.security_group.SecurityGroup(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_name_update', + 'description': 'test_description_update', + 'created_at': '2016-10-04T12:14:57.233772', + 'revision_number': 3, + 'tenant_id': '4', + 'updated_at': '2016-10-14T12:16:57.233772', + 'tags': ['5'] + + }) + + self.security_group_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_security_group =\ + mock.MagicMock(return_value=old_sg) + self.fake_client.update_security_group =\ + mock.MagicMock(return_value=new_sg) + + response = self.security_group_instance.update(new_config=new_config) + self.assertNotEqual(response.name, old_sg.name) + self.assertNotEqual(response.description, old_sg.description) + + def test_delete_security_group(self): + sg = openstack.network.v2.security_group.SecurityGroup(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_name', + 'created_at': '2016-10-04T12:14:57.233772', + 'description': '1', + 'revision_number': 3, + 'tenant_id': '4', + 'updated_at': '2016-10-14T12:16:57.233772', + 'tags': ['5'] + + }) + + self.security_group_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_security_group = mock.MagicMock(return_value=sg) + self.fake_client.delete_security_group = \ + mock.MagicMock(return_value=None) + + response = self.security_group_instance.delete() + self.assertIsNone(response) diff --git a/openstack_sdk/tests/network/test_security_group_rule.py b/openstack_sdk/tests/network/test_security_group_rule.py new file mode 100644 index 00000000..ab5cf31b --- /dev/null +++ b/openstack_sdk/tests/network/test_security_group_rule.py @@ -0,0 +1,163 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Standard imports +import mock + +# Third party imports +import openstack.network.v2.security_group_rule + +# Local imports +from openstack_sdk.tests import base +from openstack_sdk.resources import networks + + +class SecurityGroupRuleTestCase(base.OpenStackSDKTestBase): + def setUp(self): + super(SecurityGroupRuleTestCase, self).setUp() + self.fake_client =\ + self.generate_fake_openstack_connection('security_group_rule') + + self.security_group_rule_instance =\ + networks.OpenstackSecurityGroupRule( + client_config=self.client_config, + logger=mock.MagicMock() + ) + self.security_group_rule_instance.connection = self.connection + + def test_get_security_group_rule(self): + sg_rule =\ + openstack.network.v2.security_group_rule.SecurityGroupRule(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_name', + 'created_at': '0', + 'description': '1', + 'direction': '2', + 'ethertype': '3', + 'port_range_max': 4, + 'port_range_min': 5, + 'protocol': '6', + 'remote_group_id': '7', + 'remote_ip_prefix': '8', + 'revision_number': 9, + 'security_group_id': '10', + 'tenant_id': '11', + 'updated_at': '12' + }) + self.security_group_rule_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_security_group_rule =\ + mock.MagicMock(return_value=sg_rule) + + response = self.security_group_rule_instance.get() + self.assertEqual(response.id, 'a95b5509-c122-4c2f-823e-884bb559afe8') + self.assertEqual(response.name, 'test_name') + + def test_list_security_group_rules(self): + sgs = [ + openstack.network.v2.security_group_rule.SecurityGroupRule(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_name_1', + 'created_at': '0', + 'description': '1', + 'direction': '2', + 'ethertype': '3', + 'port_range_max': 4, + 'port_range_min': 5, + 'protocol': '6', + 'remote_group_id': '7', + 'remote_ip_prefix': '8', + 'revision_number': 9, + 'security_group_id': '10', + 'tenant_id': '11', + 'updated_at': '12' + }), + openstack.network.v2.security_group_rule.SecurityGroupRule(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'name': 'test_name_2', + 'created_at': '0', + 'description': '1', + 'direction': '2', + 'ethertype': '3', + 'port_range_max': 4, + 'port_range_min': 5, + 'protocol': '6', + 'remote_group_id': '7', + 'remote_ip_prefix': '8', + 'revision_number': 9, + 'security_group_id': '10', + 'tenant_id': '11', + 'updated_at': '12' + }) + ] + + self.fake_client.security_group_rules =\ + mock.MagicMock(return_value=sgs) + response = self.security_group_rule_instance.list() + self.assertEqual(len(response), 2) + + def test_create_security_group_rule(self): + rule = { + 'id:': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_name', + 'description': 'test_description', + 'availability_zone_hints': ['1'], + 'availability_zones': ['2'], + 'distributed': False, + 'flavor_id': '5', + 'ha': False, + 'routes': ['8'], + 'tenant_id': '10', + } + + new_res =\ + openstack.network.v2.security_group_rule.SecurityGroupRule(**rule) + self.security_group_rule_instance.config = rule + self.fake_client.create_security_group_rule =\ + mock.MagicMock(return_value=new_res) + + response = self.security_group_rule_instance.create() + self.assertEqual(response.name, rule['name']) + self.assertEqual(response.description, rule['description']) + + def test_delete_security_group_rule(self): + sg = openstack.network.v2.security_group_rule.SecurityGroupRule(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_name', + 'description': 'test_description', + 'created_at': '0', + 'direction': '2', + 'ethertype': '3', + 'port_range_max': 4, + 'port_range_min': 5, + 'protocol': '6', + 'remote_group_id': '7', + 'remote_ip_prefix': '8', + 'revision_number': 9, + 'security_group_id': '10', + 'tenant_id': '11', + 'updated_at': '12' + + }) + + self.security_group_rule_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_security_group_rule =\ + mock.MagicMock(return_value=sg) + self.fake_client.delete_security_group_rule = \ + mock.MagicMock(return_value=None) + + response = self.security_group_rule_instance.delete() + self.assertIsNone(response) diff --git a/openstack_sdk/tests/network/test_subnet.py b/openstack_sdk/tests/network/test_subnet.py new file mode 100644 index 00000000..9fb9e43b --- /dev/null +++ b/openstack_sdk/tests/network/test_subnet.py @@ -0,0 +1,251 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Standard imports +import mock + +# Third party imports +import openstack.network.v2.subnet + +# Local imports +from openstack_sdk.tests import base +from openstack_sdk.resources import networks + + +class SubnetTestCase(base.OpenStackSDKTestBase): + def setUp(self): + super(SubnetTestCase, self).setUp() + self.fake_client = self.generate_fake_openstack_connection('subnet') + + self.subnet_instance = networks.OpenstackSubnet( + client_config=self.client_config, + logger=mock.MagicMock() + ) + self.subnet_instance.connection = self.connection + + def test_get_subnet(self): + subnet = openstack.network.v2.subnet.Subnet(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_name', + 'allocation_pools': [{'1': 1}], + 'cidr': '2', + 'created_at': '3', + 'description': '4', + 'dns_nameservers': ['5'], + 'enable_dhcp': True, + 'gateway_ip': '6', + 'host_routes': ['7'], + 'ip_version': 8, + 'ipv6_address_mode': '9', + 'ipv6_ra_mode': '10', + 'network_id': '12', + 'revision_number': 13, + 'segment_id': '14', + 'service_types': ['15'], + 'subnetpool_id': '16', + 'tenant_id': '17', + 'updated_at': '18', + 'use_default_subnetpool': True, + }) + self.subnet_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_subnet = mock.MagicMock(return_value=subnet) + response = self.subnet_instance.get() + self.assertEqual(response.id, 'a95b5509-c122-4c2f-823e-884bb559afe8') + self.assertEqual(response.name, 'test_name') + + def test_list_subnets(self): + subnets = [ + openstack.network.v2.subnet.Subnet(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_name_1', + 'description': 'test_description_1', + 'allocation_pools': [{'1': 1}], + 'cidr': '2', + 'created_at': '3', + 'dns_nameservers': ['5'], + 'enable_dhcp': True, + 'gateway_ip': '6', + 'host_routes': ['7'], + 'ip_version': 8, + 'ipv6_address_mode': '9', + 'ipv6_ra_mode': '10', + 'network_id': '12', + 'revision_number': 13, + 'segment_id': '14', + 'service_types': ['15'], + 'subnetpool_id': '16', + 'tenant_id': '17', + 'updated_at': '18', + 'use_default_subnetpool': True, + }), + openstack.network.v2.subnet.Subnet(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'name': 'test_name_2', + 'description': 'test_description_2', + 'allocation_pools': [{'1': 1}], + 'cidr': '2', + 'created_at': '3', + 'dns_nameservers': ['5'], + 'enable_dhcp': True, + 'gateway_ip': '6', + 'host_routes': ['7'], + 'ip_version': 8, + 'ipv6_address_mode': '9', + 'ipv6_ra_mode': '10', + 'network_id': '12', + 'revision_number': 13, + 'segment_id': '14', + 'service_types': ['15'], + 'subnetpool_id': '16', + 'tenant_id': '17', + 'updated_at': '18', + 'use_default_subnetpool': True, + }) + ] + + self.fake_client.subnets = mock.MagicMock(return_value=subnets) + response = self.subnet_instance.list() + self.assertEqual(len(response), 2) + + def test_create_subnet(self): + subnet = { + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_name', + 'description': 'test_description', + 'allocation_pools': [{'1': 1}], + 'cidr': '2', + 'dns_nameservers': ['5'], + 'enable_dhcp': True, + 'gateway_ip': '6', + 'host_routes': ['7'], + 'ip_version': 8, + 'ipv6_address_mode': '9', + 'ipv6_ra_mode': '10', + 'network_id': '12', + 'segment_id': '14', + 'service_types': ['15'], + 'subnetpool_id': '16', + 'tenant_id': '17', + 'use_default_subnetpool': True, + } + + new_res = openstack.network.v2.subnet.Subnet(**subnet) + self.subnet_instance.config = subnet + self.fake_client.create_subnet = mock.MagicMock(return_value=new_res) + + response = self.subnet_instance.create() + self.assertEqual(response.name, subnet['name']) + + def test_update_subnet(self): + old_subnet =\ + openstack.network.v2.subnet.Subnet(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_name', + 'description': 'test_description', + 'allocation_pools': [{'1': 1}], + 'cidr': '2', + 'created_at': '3', + 'dns_nameservers': ['5'], + 'enable_dhcp': True, + 'gateway_ip': '6', + 'host_routes': ['7'], + 'ip_version': 8, + 'ipv6_address_mode': '9', + 'ipv6_ra_mode': '10', + 'network_id': '12', + 'revision_number': 13, + 'segment_id': '14', + 'service_types': ['15'], + 'subnetpool_id': '16', + 'tenant_id': '17', + 'updated_at': '18', + 'use_default_subnetpool': True, + }) + + new_config = { + 'name': 'test_name_update', + 'description': 'test_description_update', + 'enable_dhcp': False + } + + new_subnet =\ + openstack.network.v2.subnet.Subnet(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_name_update', + 'description': 'test_description_update', + 'allocation_pools': [{'1': 1}], + 'cidr': '2', + 'created_at': '3', + 'dns_nameservers': ['5'], + 'enable_dhcp': False, + 'gateway_ip': '6', + 'host_routes': ['7'], + 'ip_version': 8, + 'ipv6_address_mode': '9', + 'ipv6_ra_mode': '10', + 'network_id': '12', + 'revision_number': 13, + 'segment_id': '14', + 'service_types': ['15'], + 'subnetpool_id': '16', + 'tenant_id': '17', + 'updated_at': '18', + 'use_default_subnetpool': True, + }) + + self.subnet_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_subnet = mock.MagicMock(return_value=old_subnet) + self.fake_client.update_subnet =\ + mock.MagicMock(return_value=new_subnet) + + response = self.subnet_instance.update(new_config=new_config) + self.assertNotEqual(response.name, old_subnet.name) + self.assertNotEqual(response.description, old_subnet.description) + self.assertNotEqual(response.is_dhcp_enabled, + old_subnet.is_dhcp_enabled) + + def test_delete_subnet(self): + subnet = openstack.network.v2.subnet.Subnet(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_name', + 'allocation_pools': [{'1': 1}], + 'cidr': '2', + 'created_at': '3', + 'description': '4', + 'dns_nameservers': ['5'], + 'enable_dhcp': True, + 'gateway_ip': '6', + 'host_routes': ['7'], + 'ip_version': 8, + 'ipv6_address_mode': '9', + 'ipv6_ra_mode': '10', + 'network_id': '12', + 'revision_number': 13, + 'segment_id': '14', + 'service_types': ['15'], + 'subnetpool_id': '16', + 'tenant_id': '17', + 'updated_at': '18', + 'use_default_subnetpool': True, + }) + + self.subnet_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_subnet = mock.MagicMock(return_value=subnet) + self.fake_client.delete_subnet = mock.MagicMock(return_value=None) + response = self.subnet_instance.delete() + self.assertIsNone(response) diff --git a/openstack_sdk/tests/test_common.py b/openstack_sdk/tests/test_common.py new file mode 100644 index 00000000..de237474 --- /dev/null +++ b/openstack_sdk/tests/test_common.py @@ -0,0 +1,138 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Standard imports +import unittest +import mock + +# Third part imports +import openstack.compute.v2.server + +# Local imports +from openstack_sdk.resources import get_server_password +from openstack_sdk.common import OpenstackResource + + +@mock.patch('openstack.connect') +class OpenStackCommonBase(unittest.TestCase): + + def setUp(self): + super(OpenStackCommonBase, self).setUp() + + @mock.patch('openstack.proxy.Proxy') + def test_get_server(self, mock_proxy, _): + server = openstack.compute.v2.server.Server(**{ + 'id': 'a34b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_server', + 'access_ipv4': '1', + 'access_ipv6': '2', + 'addresses': {'region': '3'}, + 'config_drive': True, + 'created': '2015-03-09T12:14:57.233772', + 'flavor_id': '2', + 'image_id': '3', + 'availability_zone': 'test_availability_zone', + 'key_name': 'test_key_name', + + }) + server.get_password = mock.MagicMock(return_value='884bb559afe8') + mock_proxy._get_resource = mock.MagicMock(return_value=server) + password = get_server_password(mock_proxy, server) + self.assertEqual(password, '884bb559afe8') + + def test_openstack_resource_instance(self, _): + resource = OpenstackResource( + client_config={'foo': 'foo', 'bar': 'bar'}, + resource_config={'name': 'foo-name', + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe9'} + ) + + self.assertEqual(resource.resource_id, + 'a95b5509-c122-4c2f-823e-884bb559afe9') + self.assertEqual(resource.name, 'foo-name') + + def test_valid_resource_id(self, _): + resource = OpenstackResource( + client_config={'foo': 'foo', 'bar': 'bar'}, + resource_config={'name': 'foo-name', + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe9'} + ) + + self.assertIsNone(resource.validate_resource_identifier()) + + def test_invalid_resource_id(self, _): + resource = OpenstackResource( + client_config={'foo': 'foo', 'bar': 'bar'}, + resource_config={'name': 'foo-name', + 'id': 'sad'} + ) + + self.assertIsNotNone(resource.validate_resource_identifier()) + + @mock.patch('openstack_sdk.common.OpenstackResource.get_quota_sets') + def test_get_quota_sets(self, mock_quota, _): + resource = OpenstackResource( + client_config={'foo': 'foo', 'bar': 'bar', 'project_name': 'test'}, + resource_config={'name': 'foo-name', + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe9'} + ) + + mock_quota.return_value = 15 + self.assertEqual(resource.get_quota_sets('test'), 15) + + def test_resource_plural(self, _): + resource = OpenstackResource( + client_config={'foo': 'foo', 'bar': 'bar'}, + resource_config={'name': 'foo-name', + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe9'} + ) + + self.assertEqual(resource.resource_plural('test'), 'tests') + + def test_create(self, _): + resource = OpenstackResource( + client_config={'foo': 'foo', 'bar': 'bar'}, + resource_config={'name': 'foo-name', + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe9'} + ) + with self.assertRaises(NotImplementedError): + resource.create() + + def test_delete(self, _): + resource = OpenstackResource( + client_config={'foo': 'foo', 'bar': 'bar'}, + resource_config={'name': 'foo-name', + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe9'} + ) + with self.assertRaises(NotImplementedError): + resource.delete() + + def test_get(self, _): + resource = OpenstackResource( + client_config={'foo': 'foo', 'bar': 'bar'}, + resource_config={'name': 'foo-name', + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe9'} + ) + with self.assertRaises(NotImplementedError): + resource.get() + + def test_list(self, _): + resource = OpenstackResource( + client_config={'foo': 'foo', 'bar': 'bar'}, + resource_config={'name': 'foo-name', + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe9'} + ) + with self.assertRaises(NotImplementedError): + resource.list() diff --git a/openstack_sdk/tests/volume/__init__.py b/openstack_sdk/tests/volume/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/openstack_sdk/tests/volume/test_backup_volume.py b/openstack_sdk/tests/volume/test_backup_volume.py new file mode 100644 index 00000000..4ab11e81 --- /dev/null +++ b/openstack_sdk/tests/volume/test_backup_volume.py @@ -0,0 +1,126 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Standard imports +import mock + +# Third party imports +import openstack.block_storage.v2.backup + +# Local imports +from openstack_sdk.tests import base +from openstack_sdk.resources import volume + + +class VolumeBackupTestCase(base.OpenStackSDKTestBase): + def setUp(self): + super(VolumeBackupTestCase, self).setUp() + self.fake_client = self.generate_fake_openstack_connection('backup') + + self.volume_backup_instance = volume.OpenstackVolumeBackup( + client_config=self.client_config, + logger=mock.MagicMock() + ) + self.volume_backup_instance.connection = self.connection + + def test_get_backup(self): + volume_backup_instance = openstack.block_storage.v2.backup.Backup(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_backup', + 'description': 'volume_backup_description', + 'availability_zone': 'test_availability_zone', + 'status': 'available' + + }) + self.volume_backup_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_backup = \ + mock.MagicMock(return_value=volume_backup_instance) + + response = self.volume_backup_instance.get() + self.assertEqual(response.id, 'a95b5509-c122-4c2f-823e-884bb559afe8') + self.assertEqual(response.name, 'test_backup') + + def test_list_backups(self): + volume_backups = [ + openstack.block_storage.v2.backup.Backup(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_volume_backup_1', + 'description': 'volume_backup_description', + 'availability_zone': 'test_availability_zone', + 'status': 'available' + }), + openstack.block_storage.v2.backup.Backup(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'name': 'test_volume_backup_2', + 'description': 'volume_backup_description', + 'availability_zone': 'test_availability_zone', + 'status': 'available' + }) + ] + + self.fake_client.backups = mock.MagicMock(return_value=volume_backups) + response = self.volume_backup_instance.list() + self.assertEqual(len(response), 2) + + def test_create_backup(self): + volume_backup = { + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_volume_backup_1', + 'description': 'volume_backup_description', + 'availability_zone': 'test_availability_zone', + } + new_res = openstack.block_storage.v2.backup.Backup(**volume_backup) + self.volume_backup_instance.config = volume_backup + self.fake_client.create_backup = mock.MagicMock(return_value=new_res) + + response = self.volume_backup_instance.create() + self.assertEqual(response.name, volume_backup['name']) + + def test_delete_backup(self): + volume_backup = openstack.block_storage.v2.backup.Backup(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_volume_backup_1', + 'description': 'volume_backup_description', + 'availability_zone': 'test_availability_zone', + }) + + self.volume_backup_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_backup = mock.MagicMock( + return_value=volume_backup) + self.fake_client.delete_backup = mock.MagicMock(return_value=None) + + response = self.volume_backup_instance.delete() + self.assertIsNone(response) + + def test_restore_backup(self): + volume_backup = openstack.block_storage.v2.backup.Backup(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_volume_backup_1', + 'description': 'volume_backup_description', + 'availability_zone': 'test_availability_zone', + 'status': 'available' + }) + + self.volume_backup_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + + self.fake_client.restore_backup = \ + mock.MagicMock(return_value=volume_backup) + + response = self.volume_backup_instance.restore( + '1', '2', 'test_volume_backup_1') + self.assertEqual(response.status, 'available') diff --git a/openstack_sdk/tests/volume/test_snpashot_volume.py b/openstack_sdk/tests/volume/test_snpashot_volume.py new file mode 100644 index 00000000..cfc03ba3 --- /dev/null +++ b/openstack_sdk/tests/volume/test_snpashot_volume.py @@ -0,0 +1,105 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Standard imports +import mock + +# Third party imports +import openstack.block_storage.v2.snapshot + +# Local imports +from openstack_sdk.tests import base +from openstack_sdk.resources import volume + + +class VolumeSnapshotTestCase(base.OpenStackSDKTestBase): + def setUp(self): + super(VolumeSnapshotTestCase, self).setUp() + self.fake_client = self.generate_fake_openstack_connection('snapshot') + + self.volume_snapshot_instance = volume.OpenstackVolumeSnapshot( + client_config=self.client_config, + logger=mock.MagicMock() + ) + self.volume_snapshot_instance.connection = self.connection + + def test_get_snapshot(self): + volume_snapshot_instance = \ + openstack.block_storage.v2.snapshot.Snapshot(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_snapshot', + 'description': 'volume_backup_description', + 'status': 'available' + }) + self.volume_snapshot_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_snapshot = \ + mock.MagicMock(return_value=volume_snapshot_instance) + + response = self.volume_snapshot_instance.get() + self.assertEqual(response.id, 'a95b5509-c122-4c2f-823e-884bb559afe8') + self.assertEqual(response.name, 'test_snapshot') + + def test_list_snapshots(self): + volume_snapshots = [ + openstack.block_storage.v2.snapshot.Snapshot(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_snapshot_1', + 'description': 'volume_backup_description', + 'status': 'available' + }), + openstack.block_storage.v2.snapshot.Snapshot(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'name': 'test_snapshot_2', + 'description': 'volume_backup_description', + 'status': 'available' + }) + ] + + self.fake_client.snapshots = \ + mock.MagicMock(return_value=volume_snapshots) + response = self.volume_snapshot_instance.list() + self.assertEqual(len(response), 2) + + def test_create_snapshot(self): + volume_snapshot = { + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_snapshot_1', + 'description': 'volume_backup_description', + } + new_res = \ + openstack.block_storage.v2.snapshot.Snapshot(**volume_snapshot) + self.volume_snapshot_instance.config = volume_snapshot + self.fake_client.create_snapshot = mock.MagicMock(return_value=new_res) + + response = self.volume_snapshot_instance.create() + self.assertEqual(response.name, volume_snapshot['name']) + + def test_delete_snapshot(self): + volume_snapshot = openstack.block_storage.v2.snapshot.Snapshot(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_snapshot_1', + 'description': 'volume_backup_description', + 'status': 'available' + }) + + self.volume_snapshot_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_snapshot = mock.MagicMock( + return_value=volume_snapshot) + self.fake_client.delete_snapshot = mock.MagicMock(return_value=None) + + response = self.volume_snapshot_instance.delete() + self.assertIsNone(response) diff --git a/openstack_sdk/tests/volume/test_volume.py b/openstack_sdk/tests/volume/test_volume.py new file mode 100644 index 00000000..30a43b76 --- /dev/null +++ b/openstack_sdk/tests/volume/test_volume.py @@ -0,0 +1,108 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Standard imports +import mock + +# Third party imports +import openstack.block_storage.v2.volume + +# Local imports +from openstack_sdk.tests import base +from openstack_sdk.resources import volume + + +class VolumeTestCase(base.OpenStackSDKTestBase): + def setUp(self): + super(VolumeTestCase, self).setUp() + self.fake_client = self.generate_fake_openstack_connection('volume') + + self.volume_instance = volume.OpenstackVolume( + client_config=self.client_config, + logger=mock.MagicMock() + ) + self.volume_instance.connection = self.connection + + def test_get_volume(self): + volume_instance = openstack.block_storage.v2.volume.Volume(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_volume', + 'description': 'volume_description', + 'status': 'available' + }) + self.volume_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_volume = \ + mock.MagicMock(return_value=volume_instance) + + response = self.volume_instance.get() + self.assertEqual(response.id, 'a95b5509-c122-4c2f-823e-884bb559afe8') + self.assertEqual(response.name, 'test_volume') + + def test_list_volumes(self): + volumes = [ + openstack.block_storage.v2.volume.Volume(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_volume_1', + 'description': 'volume_description_1', + 'availability_zone': 'test_availability_zone', + 'is_bootable': False, + 'status': 'available' + }), + openstack.block_storage.v2.volume.Volume(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'name': 'test_volume_2', + 'description': 'volume_description_2', + 'availability_zone': 'test_availability_zone', + 'is_bootable': False, + 'status': 'available' + }), + ] + + self.fake_client.volumes = mock.MagicMock(return_value=volumes) + response = self.volume_instance.list() + self.assertEqual(len(response), 2) + + def test_create_volume(self): + volume_instance = { + 'name': 'test_volume', + 'description': 'volume_description', + 'size': '12' + } + new_res = openstack.block_storage.v2.volume.Volume(**volume_instance) + self.volume_instance.config = volume_instance + self.fake_client.create_volume = mock.MagicMock(return_value=new_res) + + response = self.volume_instance.create() + self.assertEqual(response.name, volume_instance['name']) + + def test_delete_volume(self): + volume_instance = openstack.block_storage.v2.volume.Volume(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_volume_1', + 'description': 'volume_description_1', + 'availability_zone': 'test_availability_zone', + 'is_bootable': False, + 'status': 'available' + }) + + self.volume_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_volume = mock.MagicMock( + return_value=volume_instance) + self.fake_client.delete_volume = mock.MagicMock(return_value=None) + + response = self.volume_instance.delete() + self.assertIsNone(response) diff --git a/openstack_sdk/tests/volume/test_volume_type.py b/openstack_sdk/tests/volume/test_volume_type.py new file mode 100644 index 00000000..4b1b1660 --- /dev/null +++ b/openstack_sdk/tests/volume/test_volume_type.py @@ -0,0 +1,108 @@ +# ####### +# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Standard imports +import mock + +# Third party imports +import openstack.block_storage.v2.type + +# Local imports +from openstack_sdk.tests import base +from openstack_sdk.resources import volume + + +class VolumeTypeTestCase(base.OpenStackSDKTestBase): + def setUp(self): + super(VolumeTypeTestCase, self).setUp() + self.fake_client = \ + self.generate_fake_openstack_connection('volume_type') + + self.volume_type_instance = volume.OpenstackVolumeType( + client_config=self.client_config, + logger=mock.MagicMock() + ) + self.volume_type_instance.connection = self.connection + + def test_get_volume_type(self): + volume_type = openstack.block_storage.v2.type.Type(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_volume_type', + 'extra_specs': { + 'capabilities': 'gpu', + } + }) + self.volume_type_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_type = mock.MagicMock(return_value=volume_type) + + response = self.volume_type_instance.get() + self.assertEqual(response.id, 'a95b5509-c122-4c2f-823e-884bb559afe8') + self.assertEqual(response.name, 'test_volume_type') + + def test_list_volume_types(self): + volume_type_list = [ + openstack.block_storage.v2.type.Type(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_volume_type_1', + 'extra_specs': { + 'capabilities': 'gpu', + } + + }), + openstack.block_storage.v2.type.Type(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe7', + 'name': 'test_volume_type_2', + 'extra_specs': { + 'capabilities': 'gpu', + } + }) + ] + + self.fake_client.types = mock.MagicMock(return_value=volume_type_list) + response = self.volume_type_instance.list() + self.assertEqual(len(response), 2) + + def test_create_volume_type(self): + volume_type = { + 'name': 'test_volume_type', + 'extra_specs': { + 'capabilities': 'gpu', + } + } + new_res = openstack.block_storage.v2.type.Type(**volume_type) + self.volume_type_instance.config = volume_type + self.fake_client.create_type = mock.MagicMock(return_value=new_res) + + response = self.volume_type_instance.create() + self.assertEqual(response.name, volume_type['name']) + + def test_delete_volume_type(self): + volume_type = openstack.block_storage.v2.type.Type(**{ + 'id': 'a95b5509-c122-4c2f-823e-884bb559afe8', + 'name': 'test_volume_type_1', + 'extra_specs': { + 'capabilities': 'gpu', + } + + }) + + self.volume_type_instance.resource_id = \ + 'a95b5509-c122-4c2f-823e-884bb559afe8' + self.fake_client.get_type = mock.MagicMock(return_value=volume_type) + self.fake_client.delete_type = mock.MagicMock(return_value=None) + + response = self.volume_type_instance.delete() + self.assertIsNone(response) diff --git a/plugin.yaml b/plugin.yaml index c47edf66..727cd20a 100644 --- a/plugin.yaml +++ b/plugin.yaml @@ -1,1973 +1,1413 @@ -################################################################################## -# Cloudify OpenStack built in types and plugins definitions. -################################################################################## - plugins: + openstack: executor: central_deployment_agent - source: https://github.com/cloudify-cosmo/cloudify-openstack-plugin/archive/2.14.8.zip + source: https://github.com/cloudify-cosmo/cloudify-openstack-plugin/archive/3.0.0.zip package_name: cloudify-openstack-plugin - package_version: '2.14.8' + package_version: '3.0.0' + +dsl_definitions: + + client_config: &client_config + client_config: + type: cloudify.types.openstack.ClientConfig + description: Your OpenStack client configuration. + required: true + + # Every resource uses this property unless noted. + external_resource: &external_resource + use_external_resource: + description: Indicate whether the resource exists or if Cloudify should create the resource, true if you are bringing an existing resource, false if you want cloudify to create it. + type: boolean + default: false + + # Every resource uses this property unless noted. + resource_id: &resource_id + resource_id: + description: The Openstack resource ID of the external resource, if use_external_resource is true. Otherwise it is an empty string. + type: string + default: '' + + id: &data_type_id + id: + description: Resource id + type: string + required: false + + kwargs: &data_type_kwargs + kwargs: + description: A dictionary of keys and values that is not validated but will override other values in the resource config. + default: {} + + name: &data_type_name + name: + description: Resource name. + type: string + required: false + + description: &data_type_description + description: + description: Resource description. + type: string + required: false data_types: - cloudify.openstack.types.custom_configuration: - description: Custom configuration for OpenStack's API endpoints. + + cloudify.types.openstack.ClientConfig: + # See: https://docs.openstack.org/python-openstackclient/pike/cli/man/openstack.html. properties: - nova_client: - description: Parameters for Nova Client. - required: false - neutron_client: - description: Parameters for Neutron Client. - required: false - keystone_client: - description: Parameters for Keystone Client. + auth_url: + type: string + description: Your OpenStack Client Authorization URL. + required: true + username: + type: string + description: Your OpenStack username. + required: true + password: + type: string + description: Your OpenStack password. + required: true + project_name: + type: string + description: The name of your OpenStack project. required: false - cinder_client: - description: Parameters for Cinder Client. + region_name: + type: string + description: The region where you want this particular resource to be created. required: false - glance_client: - description: Parameters for Glance Client. + user_domain_id: + type: string + description: The user domain id required: false - cloudify.openstack.types.logging.groups: - description: > - For logging groups' configuration. Each level can be one of the following values: - critical/error/warning/info/debug/notset + cloudify.types.openstack.Network: + properties: + <<: *data_type_kwargs + <<: *data_type_name + <<: *data_type_id + external: + description: Whether this network is externally accessible. + type: boolean + default: false + admin_state_up: + description: The administrative state of the network, which is up (true) or down (false). + type: boolean + default: true + + # Note that the OpenStack API object specifies Subnets, a list of subnet. This data type refers to a member of that list. + cloudify.types.openstack.Subnet: properties: - nova: - description: Default logging level for Nova. + <<: *data_type_kwargs + <<: *data_type_name + <<: *data_type_id + enable_dhcp: + description: Indicates whether dhcp is enabled or disabled for the subnet. Default is true. + type: boolean + required: false + network_id: + description: The ID of the network to which the subnet belongs. + type: string + required: false + dns_nameservers: + description: List of dns name servers associated with the subnet. Default is an empty list. + required: false + allocation_pools: + description: Allocation pools with start and end IP addresses for this subnet. If allocation_pools are not specified, OpenStack Networking automatically allocates pools for covering all IP addresses in the CIDR, excluding the address reserved for the subnet gateway by default. + required: false + host_routes: + description: Additional routes for the subnet. A list of dictionaries with destination and nexthop parameters. Default value is an empty list. + required: false + ip_version: + description: The IP protocol version. Value is 4 or 6. type: string required: false - neutron: - description: Default logging level for Neutron. + gateway_ip: + description: Gateway IP of this subnet. If the value is null that implies no gateway is associated with the subnet. If the gateway_ip is not specified, OpenStack Networking allocates an address from the CIDR for the gateway for the subnet by default. type: string required: false - cinder: - description: Default logging level for Cinder. + cidr: + description: The CIDR of the subnet. type: string required: false - keystone: - description: Default logging level for Keystone. + prefixlen: + description: The prefix length to use for subnet allocation from a subnet pool. If not specified, the default_prefixlen value of the subnet pool will be used. + type: integer + required: false + ipv6_address_mode: + description: The IPv6 address modes specifies mechanisms for assigning IP addresses. Value is slaac, dhcpv6-stateful, dhcpv6-stateless. type: string required: false - glance: - description: Default logging level for Glance. + ipv6_ra_mode: + description: The IPv6 router advertisement specifies whether the networking service should transmit ICMPv6 packets, for a subnet. Value is slaac, dhcpv6-stateful, dhcpv6-stateless. type: string required: false - cloudify.openstack.types.logging: - description: Logging configuration for OpenStack communication. + cloudify.types.openstack.Port: properties: - use_cfy_logger: - description: If true, a logging handler is added, which emits logs to the Cloudify logger. - type: boolean - default: true - groups: - description: Assigns logging level to logger groups. - type: cloudify.openstack.types.logging.groups + <<: *data_type_kwargs + <<: *data_type_name + <<: *data_type_id + allowed_address_pairs: + description: A set of zero or more allowed address pair objects each where address pair object contains an ip_address and mac_address. While the ip_address is required, the mac_address will be taken from the port if not specified. The value of ip_address can be an IP Address or a CIDR (if supported by the underlying extension plugin). A server connected to the port can send a packet with source address which matches one of the specified allowed address pairs. + required: false + default: [] + device_id: + description: The ID of the device that uses this port. For example, a server instance or a logical router. + type: string + required: false + device_owner: + description: The entity type that uses this port. For example, compute:nova (server instance), network:dhcp (DHCP agent) or network:router_interface (router interface). + type: string + required: false + fixed_ips: + description: > + list. + The IP addresses for the port. If you would like to assign multiple IP addresses for the port, specify multiple entries in this field. Each entry consists of IP address (ip_address) and the subnet ID from which the IP address is assigned (subnet_id). + - If you specify both a subnet ID and an IP address, OpenStack Networking tries to allocate the IP address on that subnet to the port. + - If you specify only a subnet ID, OpenStack Networking allocates an available IP from that subnet to the port. + - If you specify only an IP address, OpenStack Networking tries to allocate the IP address if the address is a valid IP for any of the subnets on the specified network. + required: false + network_id: + description: The ID of the network to which the subnet belongs. + type: string required: false - loggers: - description: Assigns logging level to custom loggers (dictionary of string -> logging level). + security_groups: + description: list. The IDs of security groups applied to the port. required: false - # Note: as per CFY-7824, this custom type cannot be used as an argument - # to the "type:" field of operation inputs. + cloudify.types.openstack.SecurityGroup: + properties: + <<: *data_type_kwargs + <<: *data_type_name + <<: *data_type_id + <<: *data_type_description - cloudify.openstack.types.configuration: - description: Configuration for OpenStack communication. + cloudify.types.openstack.Routes: properties: - username: - description: User to authenticate to KeyStone with. + <<: *data_type_name + <<: *data_type_id + host_routes: + description: > + Additional routes for the subnet. + A list of dictionaries with destination and nexthop parameters. + Default value is an empty list. + required: false + default: [] + + cloudify.types.openstack.Router: + properties: + <<: *data_type_kwargs + <<: *data_type_name + <<: *data_type_id + + cloudify.types.openstack.FloatingIP: + properties: + <<: *data_type_kwargs + <<: *data_type_description + <<: *data_type_id + floating_network_id: + description: The ID of the network associated with the floating IP. type: string required: false - password: - description: Password to authenticate to KeyStone with. + floating_network_name: + description: The Name of the network associated with the floating IP. type: string required: false - auth_url: - description: Keystone's URL (used for authentication). + fixed_ip_address: + description: The fixed IP address that is associated with the floating IP. If an internal port has multiple associated IP addresses, the service chooses the first IP address unless you explicitly define a fixed IP address in the fixed_ip_address parameter. type: string required: false - tenant_name: - description: Name of tenant. + floating_ip_address: + description: The floating IP address. type: string required: false - project_id: - description: ID of project to operate on. + port_id: + description: The ID of a port associated with the floating IP. To associate the floating IP with a fixed IP at creation time, you must specify the identifier of the internal port. type: string required: false - project_name: - description: Name of project to operate on. + subnet_id: + description: The subnet ID on which you want to create the floating IP. + type: string + required: false + dns_domain: + description: A valid DNS domain. type: string required: false - user_domain_name: - description: Domain name to operate on. + dns_name: + description: A valid DNS name. type: string required: false - project_domain_name: - description: Project domain name to operate on. + + cloudify.types.openstack.SecurityGroupRule: + properties: + <<: *data_type_name + <<: *data_type_kwargs + <<: *data_type_id + remote_group_id: + description: The remote group UUID to associate with this security group rule. You can specify either the remote_group_id or remote_ip_prefix attribute in the request body. type: string required: false - region: - description: The region's name (optional if only one region exists). + protocol: + description: The IP protocol of the security group rule. type: string required: false - insecure: - description: If true, SSL validation is skipped. - type: boolean + direction: + description: Ingress or egress, which is the direction in which the security group rule is applied. + type: string required: false - default: false - ca_cert: - description: Path to CA certificate to validate OpenStack's endpoint with. + port_range_min: + description: The minimum port number in the range that is matched by the security group rule. If the protocol is TCP, UDP, DCCP, SCTP or UDP-Lite this value must be less than or equal to the port_range_max attribute value. If the protocol is ICMP, this value must be an ICMP type. + type: integer + required: false + port_range_max: + description: The maximum port number in the range that is matched by the security group rule. If the protocol is TCP, UDP, DCCP, SCTP or UDP-Lite this value must be greater than or equal to the port_range_min attribute value. If the protocol is ICMP, this value must be an ICMP type. + type: integer + required: false + security_group_id: + description: The security group ID to associate with this security group rule. type: string required: false - nova_url: - description: DEPRECATED - use 'custom_configuration' and 'bypass_url' instead. + remote_ip_prefix: + description: The remote IP prefix that is matched by this security group rule. type: string required: false - neutron_url: - description: DEPRECATED - use 'custom_configuration' and 'endpoint_url' instead. + + cloudify.types.openstack.RBACPolicy: + properties: + <<: *data_type_id + target_tenant: + description: The ID of the tenant to which the RBAC policy will be enforced. type: string required: false - custom_configuration: - description: Custom configuration for OpenStack's API endpoints. - type: cloudify.openstack.types.custom_configuration + object_type: + description: > + The type of the object that the RBAC policy affects. + Types include qos-policy or network. + type: string required: false - logging: - description: Logging configuration. - type: cloudify.openstack.types.logging + object_id: + description: > + The ID of the object_type resource. An object_type of + network returns a network ID and an object_type of qos-policy returns a QoS ID. + type: string + required: false + action: + description: > + Action for the RBAC policy which is access_as_external or access_as_shared. + type: string required: false -node_types: - - cloudify.openstack.nodes.Server: - derived_from: cloudify.nodes.Compute + cloudify.types.openstack.Server: properties: - server: + <<: *data_type_kwargs + <<: *data_type_name + <<: *data_type_id + image_id: + description: > + The UUID of the image to use for your server instance. + This is not required in case of boot from volume. + In all other cases it is required + and must be a valid UUID otherwise API will return 400 + required: false + type: string + flavor_id: + description: > + The flavor reference, as an ID (including a UUID) or + full URL, for the flavor for your server instance. + required: false + type: string + availability_zone: + description: > + The availability zone from which to launch the server. + When you provision resources, you specify from which availability + zone you want your instance to be built. Typically, + an admin user will use availability zones to arrange + OpenStack compute hosts into logical groups. + An availability zone provides a form of physical isolation + and redundancy from other availability zones. + For instance, if some racks in your data center are + on a separate power source, you can put servers in those racks + in their own availability zone. Availability zones can + also help separate different classes of hardware. + By segregating resources into availability zones, + you can ensure that your application resources are spread across + disparate machines to achieve high availability + in the event of hardware or other failure. + You can list the available availability zones + by calling the os-availability-zone API, + but you should avoid using the default availability zone when + booting the instance. In general, + the default availability zone is named nova. + This AZ is only shown when listing the availability zones as an admin + required: false + type: string + user_data: + description: > + Configuration information or scripts to use upon launch. + Must be Base64 encoded. Restricted to 65535 bytes. + required: false + type: string + metadata: + description: > + Metadata key and value pairs. + The maximum size of the metadata key and value is 255 bytes each. + required: false default: {} + security_groups: + description: > + One or more security groups. + Specify the name of the security group in the name attribute. + If you omit this attribute, the API creates the server + in the default security group. Requested security groups are not + applied to pre-existing ports + required: false + default: [] + networks: + description: > + A list of network object. + Required parameter when there are multiple networks + defined for the tenant. When you do not specify the networks parameter, + the server attaches to the only network created for the current tenant. + required: false + default: [] + key_name: + description: > + Key pair name. + required: false + type: string + + cloudify.types.openstack.HostAggregate: + properties: + <<: *data_type_name + availability_zone: + required: false + type: string + description: The name of the host aggregate. + + cloudify.types.openstack.ServerGroup: + properties: + <<: *data_type_kwargs + <<: *data_type_name + <<: *data_type_id + policies: + description: > + A list of exactly one policy name to associate with the server group. The current valid policy names are: + anti-affinity - servers in this group must be scheduled to different hosts. + affinity - servers in this group must be scheduled to the same host. + soft-anti-affinity - servers in this group should be scheduled to different hosts if possible, but if not possible then they should still be scheduled instead of resulting in a build failure. This policy was added in microversion 2.15. + soft-affinity - servers in this group should be scheduled to the same host if possible, but if not possible then they should still be scheduled instead of resulting in a build failure. This policy was added in microversion 2.15. + required: false + + cloudify.types.openstack.KeyPair: + properties: + <<: *data_type_kwargs + <<: *data_type_name + <<: *data_type_id + public_key: + description: > + The public ssh key to import. If you omit this value, + a keypair is generated for you + required: false + type: string + + cloudify.types.openstack.Image: + properties: + <<: *data_type_kwargs + <<: *data_type_name + <<: *data_type_id + container_format: + description: > + Format of the image container. + required: false + type: string + disk_format: + description: > + The format of the disk. + required: false + type: string + tags: + description: > + List of tags for this image. Each tag is a string of at most 255 chars. + The maximum number of tags allowed on an image is set by the operator. + required: false + default: [] + + cloudify.types.openstack.Flavor: + properties: + <<: *data_type_kwargs + <<: *data_type_name + <<: *data_type_id + <<: *data_type_description + ram: + description: > + The amount of RAM a flavor has, in MiB + required: false + type: integer + disk: + description: > + The size of the root disk that will be created in GiB. + If 0 the root disk will be set to exactly the size of the image used + to deploy the instance. However, in this case filter scheduler cannot + select the compute host based on the virtual image size. + Therefore, 0 should only be used for volume booted instances or for testing purposes. + required: false + type: integer + vcpus: + description: > + The number of virtual CPUs that will be allocated to the server. + required: false + type: integer + + cloudify.types.openstack.User: + properties: + <<: *data_type_kwargs + <<: *data_type_name + <<: *data_type_id + <<: *data_type_description + default_project_id: + description: > + The ID of the default project for the user. + A user's default project must not be a domain. + Setting this attribute does not grant any actual authorization + on the project, and is merely provided for convenience. + Therefore, the referenced project does not need to exist within the user domain. Since v3.1. + If the user does not have authorization to their default project, + the default project is ignored at token creation. Since v3.1. + Additionally, if your default project is not valid, a token is issued without an explicit scope of authorization + required: false + type: string + domain_id: + description: > + The ID of the domain of the user. If the domain ID is not provided + in the request, the Identity service will attempt to pull the + domain ID from the token used in the request.Note that this requires the use of a domain-scoped token. + required: false + type: string + enabled: description: > - The keypair object as described in OpenStack compute API (create server method): - https://developer.openstack.org/api-ref/compute/#servers-servers - use_external_resource: + If the user is enabled, this value is true. If the user is disabled, this value is false + required: false type: boolean - default: false + password: + description: > + The password for the user. + required: false + type: string + email: description: > - a boolean for setting whether to create the resource or use an existing one. - See the using existing resources section. - create_if_missing: + The email for the user. + required: false + type: string + + cloudify.types.openstack.Project: + properties: + <<: *data_type_kwargs + <<: *data_type_name + <<: *data_type_id + <<: *data_type_description + is_domain: + description: > + Indicates whether the project also acts as a domain. + If set to true, this project acts as both a project and domain. + required: false type: boolean - default: false + domain_id: description: > - If use_external_resource is ``true`` and the resource is missing, - create it instead of failing. - resource_id: + The ID of the domain for the project. + For projects acting as a domain, the domain_id must not be specified, it will be generated by the Identity service implementation. + required: false type: string - default: '' + parent_id: description: > - name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string). - image: + The ID of the parent of the project.. + required: false type: string - default: '' + tags: + description: > + A list of simple strings assigned to a project. Tags can be used to classify projects into groups. + required: false + default: [] + + cloudify.types.openstack.Volume: + properties: + <<: *data_type_kwargs + <<: *data_type_name + <<: *data_type_id + <<: *data_type_description + project_id: description: > - The image for the server. - May receive either the ID or the name of the image. - note: This property is currently optional for backwards compatibility, - but will be modified to become a required property in future versions - (Default: ''). - flavor: + The UUID of the project in a multi-tenancy cloud. + required: false type: string - default: '' + size: + description: > + The size of the volume, in gibibytes (GiB). + required: false + type: integer + availability_zone : description: > - The flavor for the server. - May receive either the ID or the name of the flavor. - note: This property is currently optional for backwards compatibility, - but will be modified to become a required property in future versions - (Default: ''). - management_network_name: + The name of the availability zone. + required: false type: string + imageRef: + description: > + The UUID of the image from which you want to create the volume. + Required to create a bootable volume. required: false + type: string + snapshot_id: description: > - Old requirement called management network name. Not required. Leave blank. Retained for backward compatibility. - Not every server need be connected to a management network. - If the management network's name information is available in the Provider Context, - this connection is made automatically and there's no need to override this property - (See the Misc section for more information on the Openstack Provider Context). - It is not required to set the value of this property to the management network name. As of Cloudify 4.0, it has no relationship to the bootstrap process. - use_password: - type: boolean - default: false + To create a volume from an existing snapshot, + specify the UUID of the volume snapshot. + The volume is created in same availability zone and with same size as the snapshot. + required: false + type: string + volume_type: + description: > + The volume type (either name or ID). + To create an environment with multiple-storage back ends, you + must specify a volume type. Block Storage volume back ends are + spawned as children to cinder- volume, and they are keyed from a + unique queue. They are named cinder- volume.HOST.BACKEND. For + example, cinder- volume.ubuntu.lvmdriver. When a volume is + created, the scheduler chooses an appropriate back end to handle + the request based on the volume type. Default is None. For + information about how to use volume types to create multiple- + storage back ends, + required: false + type: string + + cloudify.types.openstack.VolumeType: + properties: + <<: *data_type_kwargs + <<: *data_type_name + <<: *data_type_id + <<: *data_type_description + project_id: description: > - A boolean describing whether this server image supports user-password authentication. - Images that do should post the administrator user's password to the Openstack metadata service (e.g. via cloudbase); - The password would then be retrieved by the plugin, - decrypted using the server's keypair and then saved in the server's runtime properties. - openstack_config: - type: cloudify.openstack.types.configuration - default: {} + The UUID of the project in a multi-tenancy cloud.. + required: false + type: string + extra_specs: description: > - see Openstack Configuraion + A key and value pair that contains additional specifications + that are associated with the volume type. + Examples include capabilities, capacity, compression, + and so on, depending on the storage driver in use. + required: false + default: {} + + +node_types: + + cloudify.nodes.openstack.NetworkBase: + derived_from: cloudify.nodes.Network + properties: + <<: *external_resource + <<: *client_config + + cloudify.nodes.openstack.Network: + derived_from: cloudify.nodes.openstack.NetworkBase + properties: + resource_config: + type: cloudify.types.openstack.Network + description: A dictionary that may contain these keys https://developer.openstack.org/api-ref/network/v2/#create-network. + required: true interfaces: cloudify.interfaces.lifecycle: create: - implementation: openstack.nova_plugin.server.create + implementation: openstack.openstack_plugin.resources.network.network.create + delete: + implementation: openstack.openstack_plugin.resources.network.network.delete + cloudify.interfaces.validation: + creation: + implementation: openstack.openstack_plugin.resources.network.network.creation_validation + cloudify.interfaces.operations: + update: + implementation: openstack.openstack_plugin.resources.network.network.update inputs: args: default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - start: - implementation: openstack.nova_plugin.server.start - inputs: - start_retry_interval: - description: Polling interval until the server is active in seconds - type: integer - default: 30 - private_key_path: - description: > - Path to private key which matches the server's - public key. Will be used to decrypt password in case - the "use_password" property is set to "true" - type: string - default: '' - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - stop: - implementation: openstack.nova_plugin.server.stop + list: + implementation: openstack.openstack_plugin.resources.network.network.list_networks inputs: - openstack_config: + query: default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } + + cloudify.nodes.openstack.Subnet: + derived_from: cloudify.nodes.openstack.NetworkBase + properties: + resource_config: + type: cloudify.types.openstack.Subnet + description: A dictionary that may contain these keys https://developer.openstack.org/api-ref/network/v2/#create-subnet. This is not a list of cloudify.types.openstack.Subnet as the OpenStack API specifies. + required: true + interfaces: + cloudify.interfaces.lifecycle: + create: + implementation: openstack.openstack_plugin.resources.network.subnet.create delete: - implementation: openstack.nova_plugin.server.delete + implementation: openstack.openstack_plugin.resources.network.subnet.delete + cloudify.interfaces.validation: + creation: + implementation: openstack.openstack_plugin.resources.network.subnet.creation_validation + cloudify.interfaces.operations: + update: + implementation: openstack.openstack_plugin.resources.network.subnet.update inputs: - openstack_config: + args: default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - reboot: - implementation: openstack.nova_plugin.server.reboot + list: + implementation: openstack.openstack_plugin.resources.network.subnet.list_subnets inputs: - reboot_type: - type: string - default: soft - # suspend/resume - cloudify.interfaces.freeze: - suspend: - implementation: openstack.nova_plugin.server.freeze_suspend - inputs: {} - resume: - implementation: openstack.nova_plugin.server.freeze_resume - inputs: {} - # backups related section - cloudify.interfaces.snapshot: + query: + default: {} + + cloudify.nodes.openstack.Port: + derived_from: cloudify.nodes.Port + properties: + <<: *external_resource + <<: *client_config + resource_config: + type: cloudify.types.openstack.Port + description: A dictionary that may contain these keys https://developer.openstack.org/api-ref/network/v2/#create-port. + required: true + interfaces: + cloudify.interfaces.lifecycle: create: - implementation: openstack.nova_plugin.server.snapshot_create - inputs: {} - apply: - implementation: openstack.nova_plugin.server.snapshot_apply - inputs: {} + implementation: openstack.openstack_plugin.resources.network.port.create delete: - implementation: openstack.nova_plugin.server.snapshot_delete - inputs: {} + implementation: openstack.openstack_plugin.resources.network.port.delete cloudify.interfaces.validation: creation: - implementation: openstack.nova_plugin.server.creation_validation + implementation: openstack.openstack_plugin.resources.network.port.creation_validation + cloudify.interfaces.operations: + update: + implementation: openstack.openstack_plugin.resources.network.port.update inputs: args: default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - cloudify.interfaces.operations: list: - implementation: openstack.nova_plugin.server.list_servers + implementation: openstack.openstack_plugin.resources.network.port.list_ports inputs: - args: + query: default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - - cloudify.openstack.nodes.WindowsServer: - derived_from: cloudify.openstack.nodes.Server - properties: - use_password: - type: boolean - default: true - description: > - Default changed for derived type - because Windows instances need a password for agent installation - os_family: - type: string - default: windows - description: > - (updates the os_family default as a convenience) - agent_config: - type: cloudify.datatypes.AgentConfig - default: - port: 5985 - description: > - (updates the defaults for the agent_config for Windows) - cloudify.openstack.nodes.KeyPair: - derived_from: cloudify.nodes.Root + cloudify.nodes.openstack.Router: + derived_from: cloudify.nodes.Router properties: - keypair: - default: {} - description: > - The keypair object as described in Openstack compute API (create keypair method): - https://developer.openstack.org/api-ref/compute/#keypairs-keypairs. - This parameter can be used to override and pass parameters directly to Nova client. - Note that in the case of keypair, the only nested parameter that can be used is "name". - private_key_path: - type: string - description: > - the path (on the machine the plugin is running on) to - where the private key should be stored. If - use_external_resource is set to "true", the existing - private key is expected to be at this path. - use_external_resource: - type: boolean - default: false - description: > - a boolean describing whether this resource should be - created or rather that it already exists on Openstack - and should be used as-is. - create_if_missing: - default: false - description: > - If use_external_resource is ``true`` and the resource is missing, - create it instead of failing. - resource_id: - type: string - default: '' - description: > - the name that will be given to the resource on Openstack (excluding optional prefix). - If not provided, a default name will be given instead. - If use_external_resource is set to "true", this exact - value (without any prefixes applied) will be looked for - as either the name or id of an existing keypair to be used. - openstack_config: - type: cloudify.openstack.types.configuration - default: {} - description: > - endpoints and authentication configuration for Openstack. - Expected to contain the following nested fields: - username, password, tenant_name, auth_url, region. + <<: *external_resource + <<: *client_config + resource_config: + type: cloudify.types.openstack.Router + description: A dictionary that may contain these keys https://developer.openstack.org/api-ref/network/v2/#create-router. + required: true interfaces: cloudify.interfaces.lifecycle: create: - implementation: openstack.nova_plugin.keypair.create - inputs: - args: - default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } + implementation: openstack.openstack_plugin.resources.network.router.create + start: + implementation: openstack.openstack_plugin.resources.network.router.start + stop: + implementation: openstack.openstack_plugin.resources.network.router.stop delete: - implementation: openstack.nova_plugin.keypair.delete - inputs: - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } + implementation: openstack.openstack_plugin.resources.network.router.delete cloudify.interfaces.validation: creation: - implementation: openstack.nova_plugin.keypair.creation_validation - inputs: - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } + implementation: openstack.openstack_plugin.resources.network.router.creation_validation cloudify.interfaces.operations: - list: - implementation: openstack.nova_plugin.keypair.list_keypairs + update: + implementation: openstack.openstack_plugin.resources.network.router.update inputs: args: default: {} - openstack_config: + list: + implementation: openstack.openstack_plugin.resources.network.router.list_routers + inputs: + query: default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - cloudify.openstack.nodes.Subnet: - derived_from: cloudify.nodes.Subnet + cloudify.nodes.openstack.FloatingIP: + derived_from: cloudify.nodes.VirtualIP properties: - subnet: - default: {} - description: > - The subnet object as described in Openstack networking API (create subnet method): - https://developer.openstack.org/api-ref/network/v2/index.html#subnets. - use_external_resource: - type: boolean - default: false - description: > - a boolean for setting whether to create the resource or use an existing one. - See the using existing resources section. - create_if_missing: + <<: *external_resource + <<: *client_config + allow_reallocation: type: boolean default: false description: > - If use_external_resource is ``true`` and the resource is missing, - create it instead of failing. - resource_id: - type: string - default: '' - description: > - name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string). - openstack_config: - type: cloudify.openstack.types.configuration - default: {} - description: > - endpoints and authentication configuration for Openstack. - Expected to contain the following nested fields: - username, password, tenant_name, auth_url, region. + (Applicable only when use_external_resource is true) + If true, then allow using this floating IP even if it has already been allocated to + another instance. If false, and the floating IP is already allocated (that is, it is + in 'ACTIVE' state), a recoverable error is raised. + resource_config: + type: cloudify.types.openstack.FloatingIP + description: A dictionary that may contain these keys https://developer.openstack.org/api-ref/network/v2/#create-floating-ip. + required: true interfaces: cloudify.interfaces.lifecycle: create: - implementation: openstack.neutron_plugin.subnet.create - inputs: - args: - default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } + implementation: openstack.openstack_plugin.resources.network.floating_ip.create delete: - implementation: openstack.neutron_plugin.subnet.delete - inputs: - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } + implementation: openstack.openstack_plugin.resources.network.floating_ip.delete cloudify.interfaces.validation: creation: - implementation: openstack.neutron_plugin.subnet.creation_validation + implementation: openstack.openstack_plugin.resources.network.floating_ip.creation_validation + cloudify.interfaces.operations: + update: + implementation: openstack.openstack_plugin.resources.network.floating_ip.update inputs: args: default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - cloudify.interfaces.operations: list: - implementation: openstack.neutron_plugin.subnet.list_subnets + implementation: openstack.openstack_plugin.resources.network.floating_ip.list_floating_ips inputs: - args: - default: {} - openstack_config: + query: default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - cloudify.openstack.nodes.SecurityGroup: + cloudify.nodes.openstack.SecurityGroup: derived_from: cloudify.nodes.SecurityGroup properties: - security_group: - default: {} - description: > - The security-group object as described in Openstack networking API (create security-group method): - https://developer.openstack.org/api-ref/network/v2/index.html#security-groups-security-groups. - description: - type: string - default: '' - description: > - SecurityGroup description. - create_if_missing: - type: boolean - default: false + <<: *external_resource + <<: *client_config + disable_default_egress_rules: description: > - If use_external_resource is ``true`` and the resource is missing, - create it instead of failing. - use_external_resource: + a flag for removing the default rules. If not set to `true`, + these rules will remain, and exist alongside any additional rules passed using the `rules` property. type: boolean default: false - description: > - a boolean for setting whether to create the resource or use an existing one. - See the using existing resources section. - resource_id: - type: string - default: '' - description: > - name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string). - openstack_config: - type: cloudify.openstack.types.configuration - default: {} - description: > - endpoints and authentication configuration for Openstack. - Expected to contain the following nested fields: - username, password, tenant_name, auth_url, region. - rules: + security_group_rules: + description: List of security groups rule. + required: false default: [] - description: > - key-value security_group_rule configuration as described in: - https://developer.openstack.org/api-ref/network/v2/index.html#security-group-rules-security-group-rules. - disable_default_egress_rules: - type: boolean - default: false - description: > - a flag for removing the default rules which https://wiki.openstack.org/wiki/Neutron/SecurityGroups#Behavior. If not set to `true`, these rules will remain, and exist alongside any additional rules passed using the `rules` property. + resource_config: + type: cloudify.types.openstack.SecurityGroup + description: A dictionary that may contain these keys https://developer.openstack.org/api-ref/network/v2/#create-security-group. + required: true interfaces: cloudify.interfaces.lifecycle: create: - implementation: openstack.neutron_plugin.security_group.create + implementation: openstack.openstack_plugin.resources.network.security_group.create + configure: + implementation: openstack.openstack_plugin.resources.network.security_group.configure + inputs: + security_group_rules: + default: { get_property: [ SELF, security_group_rules ] } + delete: + implementation: openstack.openstack_plugin.resources.network.security_group.delete + cloudify.interfaces.validation: + creation: + implementation: openstack.openstack_plugin.resources.network.security_group.creation_validation + cloudify.interfaces.operations: + update: + implementation: openstack.openstack_plugin.resources.network.security_group.update inputs: args: default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - delete: - implementation: openstack.neutron_plugin.security_group.delete + list: + implementation: openstack.openstack_plugin.resources.network.security_group.list_security_groups inputs: - openstack_config: + query: default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } + + cloudify.nodes.openstack.SecurityGroupRule: + derived_from: cloudify.nodes.SecurityGroup + properties: + <<: *external_resource + <<: *client_config + resource_config: + type: cloudify.types.openstack.SecurityGroupRule + description: A dictionary that may contain these keys https://developer.openstack.org/api-ref/network/v2/#create-security-group-rule + required: true + interfaces: + cloudify.interfaces.lifecycle: + create: + implementation: openstack.openstack_plugin.resources.network.security_group_rule.create + delete: + implementation: openstack.openstack_plugin.resources.network.security_group_rule.delete cloudify.interfaces.validation: creation: - implementation: openstack.neutron_plugin.security_group.creation_validation - inputs: - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } + implementation: openstack.openstack_plugin.resources.network.security_group_rule.creation_validation cloudify.interfaces.operations: list: - implementation: openstack.neutron_plugin.security_group.list_security_groups + implementation: openstack.openstack_plugin.resources.network.security_group_rule.list_security_group_rules inputs: - args: - default: {} - openstack_config: + query: default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - cloudify.openstack.nodes.Router: - derived_from: cloudify.nodes.Router + cloudify.nodes.openstack.RBACPolicy: + derived_from: cloudify.nodes.Root properties: - router: - default: {} - description: > - The router object as described in Openstack networking API (create router method): - https://developer.openstack.org/api-ref/network/v2/index.html#routers-routers. - external_network: - type: string - default: '' - description: > - An external network name or ID. - If given, the router will use this external network as a gateway. - default_to_managers_external_network: - type: boolean - default: true - description: > - A boolean which determines whether to use the Cloudify Manager's external network if no other external network was given (whether by a relationship, by the `external_network` property or by the nested `external_gateway_info` key in the `router` property). This is only relevant if the manager's external network appears in the Provider-context. Defaults to `true`. - use_external_resource: - type: boolean - default: false - description: > - a boolean for setting whether to create the resource or use an existing one. - See the using existing resources section. - create_if_missing: - type: boolean - default: false - description: > - If use_external_resource is ``true`` and the resource is missing, - create it instead of failing. - resource_id: - type: string - default: '' - description: > - name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string). - openstack_config: - type: cloudify.openstack.types.configuration - default: {} - description: > - endpoints and authentication configuration for Openstack. - Expected to contain the following nested fields: - username, password, tenant_name, auth_url, region. + <<: *external_resource + <<: *client_config + resource_config: + type: cloudify.types.openstack.RBACPolicy + description: A dictionary that may contain these keys https://developer.openstack.org/api-ref/network/v2/?expanded=create-rbac-policy-detail#create-rbac-policy + required: true interfaces: cloudify.interfaces.lifecycle: create: - implementation: openstack.neutron_plugin.router.create + implementation: openstack.openstack_plugin.resources.network.rbac_policy.create inputs: args: default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } delete: - implementation: openstack.neutron_plugin.router.delete - inputs: - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } + implementation: openstack.openstack_plugin.resources.network.rbac_policy.delete cloudify.interfaces.validation: creation: - implementation: openstack.neutron_plugin.router.creation_validation - inputs: - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } + implementation: openstack.openstack_plugin.resources.network.rbac_policy.creation_validation cloudify.interfaces.operations: update: - implementation: openstack.neutron_plugin.router.update + implementation: openstack.openstack_plugin.resources.network.rbac_policy.update inputs: args: default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } list: - implementation: openstack.neutron_plugin.router.list_routers + implementation: openstack.openstack_plugin.resources.network.rbac_policy.list_rbac_policies inputs: - args: + query: default: {} - openstack_config: + find_and_delete: + implementation: openstack.openstack_plugin.resources.network.rbac_policy.find_and_delete + inputs: + args: default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - cloudify.openstack.nodes.Routes: - derived_from: cloudify.nodes.Root + cloudify.nodes.openstack.Server: + derived_from: cloudify.nodes.Compute properties: - routes: - default: [] - description: > - The extra routes configuration for L3 router. - A list of dictionaries with destination and nexthop parameters. - It is available when extraroute extension is enabled. - Default is an empty list ([]). - https://developer.openstack.org/api-ref/network/v2/index.html#update-router - use_external_resource: + <<: *external_resource + <<: *client_config + resource_config: + type: cloudify.types.openstack.Server + description: A dictionary that may contain these keys https://developer.openstack.org/api-ref/compute/?expanded=create-server-detail + required: true + use_ipv6_ip: type: boolean - default: false description: > - a boolean for setting whether to create the resource or use an existing one. - See the using existing resources section. - create_if_missing: - type: boolean + Tells us to use the IPv6 IP if one exists for agent installation. If use_public_ip is provided, this is overridden. default: false + use_public_ip: + type: boolean description: > - If use_external_resource is ``true`` and the resource is missing, - create it instead of failing. - resource_id: - type: string - default: '' - description: > - name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string). - openstack_config: - type: cloudify.openstack.types.configuration - default: {} - description: > - endpoints and authentication configuration for Openstack. - Expected to contain the following nested fields: - username, password, tenant_name, auth_url, region. + Tells the deployment to use the public IP (if available) of the resource + for Cloudify Agent connections + default: false interfaces: cloudify.interfaces.lifecycle: create: - implementation: openstack.neutron_plugin.router.add_routes + implementation: openstack.openstack_plugin.resources.compute.server.create + configure: + implementation: openstack.openstack_plugin.resources.compute.server.configure + stop: + implementation: openstack.openstack_plugin.resources.compute.server.stop + delete: + implementation: openstack.openstack_plugin.resources.compute.server.delete + reboot: + implementation: openstack.openstack_plugin.resources.compute.server.reboot + inputs: + reboot_type: + type: string + default: soft + cloudify.interfaces.validation: + creation: + implementation: openstack.openstack_plugin.resources.compute.server.creation_validation + cloudify.interfaces.freeze: + suspend: + implementation: openstack.openstack_plugin.resources.compute.server.suspend + resume: + implementation: openstack.openstack_plugin.resources.compute.server.resume + cloudify.interfaces.snapshot: + create: + implementation: openstack.openstack_plugin.resources.compute.server.snapshot_create + inputs: {} + apply: + implementation: openstack.openstack_plugin.resources.compute.server.snapshot_apply + inputs: {} + delete: + implementation: openstack.openstack_plugin.resources.compute.server.snapshot_delete + inputs: {} + cloudify.interfaces.operations: + update: + implementation: openstack.openstack_plugin.resources.compute.server.update inputs: args: default: {} - openstack_config: + list: + implementation: openstack.openstack_plugin.resources.compute.server.list_servers + inputs: + query: default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - delete: - implementation: openstack.neutron_plugin.router.delete_routes + all_projects: + default: False + details: + default: True - cloudify.openstack.nodes.Port: - derived_from: cloudify.nodes.Port + cloudify.nodes.openstack.WindowsServer: + derived_from: cloudify.nodes.openstack.Server properties: - port: - default: {} - description: > - The port object as described in Openstack networking API (create port method): - https://developer.openstack.org/api-ref-networking-v2.html#ports. - fixed_ip: - type: string - default: '' - description: > - may be used to request a specific fixed IP for the port. - If the IP is unavailable - (either already taken or does not belong to a subnet the port is on) - an error will be raised. - use_external_resource: - type: boolean - default: false - description: > - a boolean for setting whether to create the resource or use an existing one. - See the using existing resources section. - create_if_missing: + use_password: type: boolean - default: false + default: true description: > - If use_external_resource is ``true`` and the resource is missing, - create it instead of failing. - resource_id: + Default changed for derived type + because Windows instances need a password for agent installation + os_family: type: string - default: '' + default: windows description: > - name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string). - openstack_config: - type: cloudify.openstack.types.configuration - default: {} + updates the os_family default as a convenience + agent_config: + type: cloudify.datatypes.AgentConfig + default: + port: 5985 description: > - endpoints and authentication configuration for Openstack. - Expected to contain the following nested fields: - username, password, tenant_name, auth_url, region. + updates the defaults for the agent_config for Windows) + + + + cloudify.nodes.openstack.ServerGroup: + derived_from: cloudify.nodes.Root + properties: + <<: *external_resource + <<: *client_config + resource_config: + type: cloudify.types.openstack.ServerGroup + description: https://developer.openstack.org/api-ref/compute/?expanded=create-server-detail#create-server-group + required: true interfaces: cloudify.interfaces.lifecycle: create: - implementation: openstack.neutron_plugin.port.create + implementation: openstack.openstack_plugin.resources.compute.server_group.create + delete: + implementation: openstack.openstack_plugin.resources.compute.server_group.delete + cloudify.interfaces.validation: + creation: + implementation: openstack.openstack_plugin.resources.compute.server_group.creation_validation + cloudify.interfaces.operations: + list: + implementation: openstack.openstack_plugin.resources.compute.server_group.list_server_groups inputs: - args: + query: default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - delete: - implementation: openstack.neutron_plugin.port.delete - inputs: - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - cloudify.interfaces.validation: - creation: - implementation: openstack.neutron_plugin.port.creation_validation - inputs: - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - cloudify.interfaces.operations: - list: - implementation: openstack.neutron_plugin.port.list_ports - inputs: - args: - default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - - cloudify.openstack.nodes.Network: - derived_from: cloudify.nodes.Network - properties: - network: - default: {} - description: > - The network object as described in Openstack networking API (create network method): - https://developer.openstack.org/api-ref-networking-v2.html#networks. - use_external_resource: - type: boolean - default: false - description: > - a boolean for setting whether to create the resource or use an existing one. - See the using existing resources section. - create_if_missing: - type: boolean - default: false - description: > - If use_external_resource is ``true`` and the resource is missing, - create it instead of failing. - resource_id: - type: string - default: '' - description: > - name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string). - openstack_config: - type: cloudify.openstack.types.configuration - default: {} - description: > - endpoints and authentication configuration for Openstack. - Expected to contain the following nested fields: - username, password, tenant_name, auth_url, region. - interfaces: - cloudify.interfaces.lifecycle: - create: - implementation: openstack.neutron_plugin.network.create - inputs: - args: - default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - delete: - implementation: openstack.neutron_plugin.network.delete - inputs: - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - cloudify.interfaces.validation: - creation: - implementation: openstack.neutron_plugin.network.creation_validation - inputs: - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - cloudify.interfaces.operations: - list: - implementation: openstack.neutron_plugin.network.list_networks - inputs: - args: - default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - - cloudify.openstack.nodes.FloatingIP: - derived_from: cloudify.nodes.VirtualIP - properties: - floatingip: - default: {} - description: > - The floatingip object as described in Openstack networking API (create floatingip method): - https://developer.openstack.org/api-ref/network/v2/index.html#floating-ips-floatingips - use_external_resource: - type: boolean - default: false - description: > - a boolean for setting whether to create the resource or use an existing one. - See the using existing resources section. - allow_reallocation: - type: boolean - default: false - description: > - (Applicable only when use_external_resource is true) - If true, then allow using this floating IP even if it has already been allocated to - another instance. If false, and the floating IP is already allocated (that is, it is - in 'ACTIVE' state), a recoverable error is raised. - create_if_missing: - type: boolean - default: false - description: > - If use_external_resource is ``true`` and the resource is missing, - create it instead of failing. - resource_id: - type: string - description: IP address of the floating IP - default: '' - openstack_config: - type: cloudify.openstack.types.configuration - default: {} - description: > - endpoints and authentication configuration for Openstack. - Expected to contain the following nested fields: - username, password, tenant_name, auth_url, region. - interfaces: - cloudify.interfaces.lifecycle: - create: - implementation: openstack.neutron_plugin.floatingip.create - inputs: - args: - default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - delete: - implementation: openstack.neutron_plugin.floatingip.delete - inputs: - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - cloudify.interfaces.validation: - creation: - implementation: openstack.neutron_plugin.floatingip.creation_validation - inputs: - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - cloudify.interfaces.operations: - list: - implementation: openstack.neutron_plugin.floatingip.list_floatingips - inputs: - args: - default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - cloudify.openstack.nodes.RBACPolicy: + cloudify.nodes.openstack.KeyPair: derived_from: cloudify.nodes.Root properties: - rbac_policy: - default: {} - description: > - The RBAC policy object as described in Openstack networking API (create RBAC policy method): - https://developer.openstack.org/api-ref/network/v2/#create-rbac-policy - use_external_resource: - type: boolean - default: false - description: > - a boolean for setting whether to create the resource or use an existing one. - See the using existing resources section. - create_if_missing: - type: boolean - default: false - description: > - If use_external_resource is ``true`` and the resource is missing, - create it instead of failing. - resource_id: - type: string - default: '' - description: > - name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string). - openstack_config: - type: cloudify.openstack.types.configuration - default: {} - description: > - endpoints and authentication configuration for Openstack. - Expected to contain the following nested fields: - username, password, tenant_name, auth_url, region. - interfaces: - cloudify.interfaces.lifecycle: - create: - implementation: openstack.neutron_plugin.rbac_policy.create - inputs: - args: - default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - delete: - implementation: openstack.neutron_plugin.rbac_policy.delete - inputs: - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - cloudify.interfaces.validation: - creation: - implementation: openstack.neutron_plugin.rbac_policy.creation_validation - inputs: - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - cloudify.interfaces.operations: - list: - implementation: openstack.neutron_plugin.rbac_policy.list_rbac_policies - inputs: - args: - default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - find_and_delete: - implementation: openstack.neutron_plugin.rbac_policy.find_and_delete - inputs: - args: - default: {} - openstack_config: - default: {} - - cloudify.openstack.nodes.Volume: - derived_from: cloudify.nodes.Volume - properties: - volume: - default: {} - description: > - The volume object as described in Openstack block-storage API (create volume method): - https://developer.openstack.org/api-ref/block-storage/v2/index.html#volumes-volumes. - use_external_resource: - type: boolean - default: false - description: > - a boolean for setting whether to create the resource or use an existing one. - See the using existing resources section. - create_if_missing: - type: boolean - default: false - description: > - If use_external_resource is ``true`` and the resource is missing, - create it instead of failing. - resource_id: - type: string - default: '' - description: > - name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string). - device_name: - type: string - default: auto - description: > - The device name this volume will be attached to. - Default value is *auto*, - which means openstack will auto-assign a device. - Note that if you do explicitly set a value, - this value may not be the actual device name assigned. - Sometimes the device requested will not be available and openstack will assign it to a different device, - this is why we recommend using *auto*. - openstack_config: - type: cloudify.openstack.types.configuration - default: {} - description: > - endpoints and authentication configuration for Openstack. - Expected to contain the following nested fields: - username, password, tenant_name, auth_url, region. - boot: - type: boolean - required: false - description: > - If a Server instance is connected to this Volume by a relationship, - this volume will be used as the boot volume for that Server. - This option will be deprecated in the future. - interfaces: - cloudify.interfaces.lifecycle: - create: - implementation: openstack.cinder_plugin.volume.create - inputs: - args: - default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - status_attempts: - description: > - Number of times to check for the creation's status before failing - type: integer - default: 20 - status_timeout: - description: > - Interval (in seconds) between subsequent inquiries of the creation's - status - type: integer - default: 15 - delete: - implementation: openstack.cinder_plugin.volume.delete - inputs: - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - # backups related section - cloudify.interfaces.snapshot: - create: - implementation: openstack.cinder_plugin.volume.snapshot_create - inputs: {} - apply: - implementation: openstack.cinder_plugin.volume.snapshot_apply - inputs: {} - delete: - implementation: openstack.cinder_plugin.volume.snapshot_delete - inputs: {} - cloudify.interfaces.validation: - creation: - implementation: openstack.cinder_plugin.volume.creation_validation - inputs: - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - cloudify.interfaces.operations: - list: - implementation: openstack.cinder_plugin.volume.list_volumes - inputs: - args: - default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - - cloudify.openstack.nova_net.nodes.FloatingIP: - derived_from: cloudify.nodes.VirtualIP - properties: - floatingip: - default: {} - description: > - The os-floating-ip object as described in Openstack compute API (create os-floating-ip method): - https://developer.openstack.org/api-ref/compute/#floating-ips-os-floating-ips-deprecated. - use_external_resource: - type: boolean - default: false - description: > - a boolean for setting whether to create the resource or use an existing one. - See the using existing resources section. - create_if_missing: - type: boolean - default: false - description: > - TODO: CREATE. THIS IS MISSING - resource_id: - type: string - default: '' - description: > - name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string). - openstack_config: - type: cloudify.openstack.types.configuration - default: {} - description: > - endpoints and authentication configuration for Openstack. - Expected to contain the following nested fields: - username, password, tenant_name, auth_url, region. + <<: *external_resource + <<: *client_config + resource_config: + type: cloudify.types.openstack.KeyPair + description: https://developer.openstack.org/api-ref/compute/?expanded=create-or-import-keypair-detail + required: true interfaces: cloudify.interfaces.lifecycle: create: - implementation: openstack.nova_plugin.floatingip.create - inputs: - args: - default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } + implementation: openstack.openstack_plugin.resources.compute.keypair.create delete: - implementation: openstack.nova_plugin.floatingip.delete - inputs: - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } + implementation: openstack.openstack_plugin.resources.compute.keypair.delete cloudify.interfaces.validation: creation: - implementation: openstack.nova_plugin.floatingip.creation_validation - inputs: - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - - - cloudify.openstack.nova_net.nodes.SecurityGroup: - derived_from: cloudify.nodes.SecurityGroup - properties: - security_group: - default: {} - description: > - The os-security-group object as described in Openstack compute API (create os-security-group method): - https://developer.openstack.org/api-ref/compute/#security-groups-os-security-groups-deprecated - use_external_resource: - type: boolean - default: false - description: > - a boolean for setting whether to create the resource or use an existing one. - See the using existing resources section. - create_if_missing: - type: boolean - default: false - description: > - TODO: CREATE. THIS IS MISSING - resource_id: - type: string - default: '' - description: > - name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string). - openstack_config: - type: cloudify.openstack.types.configuration - default: {} - description: > - endpoints and authentication configuration for Openstack. - Expected to contain the following nested fields: - username, password, tenant_name, auth_url, region. - rules: - default: [] - description: > - key-value security group rule as described in: - https://developer.openstack.org/api-ref/compute/#rules-for-security-group-os-security-group-rules-deprecated. - description: - type: string - description: security group description - interfaces: - cloudify.interfaces.lifecycle: - create: - implementation: openstack.nova_plugin.security_group.create - inputs: - args: - default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - status_attempts: - description: > - Number of times to check for the attachment's status before failing - type: integer - default: 10 - status_timeout: - description: > - Interval (in seconds) between subsequent inquiries of the attachment's - status - type: integer - default: 2 - delete: - implementation: openstack.nova_plugin.security_group.delete - inputs: - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - cloudify.interfaces.validation: - creation: - implementation: openstack.nova_plugin.security_group.creation_validation - inputs: - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - - cloudify.openstack.nodes.Flavor: - derived_from: cloudify.nodes.Root - properties: - flavor: - default: {} - description: > - key-value user configuration, according to: - https://developer.openstack.org/api-ref/compute/#create-flavor - extra_specs: - default: {} - description: > - key-value user configuration, according to: - https://developer.openstack.org/api-ref/compute/#create-extra-specs-for-a-flavor - tenants: - default: [] - description: > - List of tenants to add to flavor access - use_external_resource: - default: false - description: > - a boolean for setting whether to create the resource or use an existing one. - See the using existing resources section. - create_if_missing: - default: false - description: > - If use_external_resource is ``true`` and the resource is missing, - create it instead of failing. - resource_id: - default: '' - description: > - name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string). - openstack_config: - default: {} - description: > - endpoints and authentication configuration for Openstack. - Expected to contain the following nested fields: - username, password, tenant_name, auth_url, region. - interfaces: - cloudify.interfaces.lifecycle: - create: - implementation: openstack.nova_plugin.flavor.create - inputs: - args: - default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - delete: - implementation: openstack.nova_plugin.flavor.delete - inputs: - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } + implementation: openstack.openstack_plugin.resources.compute.keypair.creation_validation cloudify.interfaces.operations: list: - implementation: openstack.nova_plugin.flavor.list_flavors - inputs: - args: - default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } + implementation: openstack.openstack_plugin.resources.compute.keypair.list_keypairs - cloudify.openstack.nodes.Image: + cloudify.nodes.openstack.HostAggregate: derived_from: cloudify.nodes.Root properties: - image: - description: > - The image object as described in Openstack image API (create image method): - https://developer.openstack.org/api-ref/image/v2/index.html#images - Required parameters are (container_format, disk_format). - To create an image from the local file its path should be added in data parameter. - default: {} - image_url: - type: string - default: '' - description: > - The openstack resource URL for the image. - use_external_resource: - type: boolean - default: false - description: > - a boolean for setting whether to create the resource or use an existing one. - See the using existing resources section. - create_if_missing: - type: boolean - default: false - description: > - If use_external_resource is ``true`` and the resource is missing, - create it instead of failing. - resource_id: - type: string - default: '' - description: > - name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string). - openstack_config: - type: cloudify.openstack.types.configuration + <<: *external_resource + <<: *client_config + metadata: + required: false default: {} description: > - endpoints and authentication configuration for Openstack. - Expected to contain the following nested fields: - username, password, tenant_name, auth_url, region. + Metadata key and value pairs. The maximum size for each metadata key and value pair is 255 bytes + All keys values should be provided as string + Example + metadata: + ssd: 'True' + max_number: '1233' + resource_config: + type: cloudify.types.openstack.HostAggregate + description: https://developer.openstack.org/api-ref/compute/?expanded=create-aggregate-detail + required: true interfaces: cloudify.interfaces.lifecycle: create: - implementation: openstack.glance_plugin.image.create - inputs: - args: - default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - start: - implementation: openstack.glance_plugin.image.start - inputs: - start_retry_interval: - type: integer - default: 30 - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } + implementation: openstack.openstack_plugin.resources.compute.host_aggregate.create + configure: + implementation: openstack.openstack_plugin.resources.compute.host_aggregate.set_metadata delete: - implementation: openstack.glance_plugin.image.delete - inputs: - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - cloudify.interfaces.validation: - creation: - implementation: openstack.glance_plugin.image.creation_validation - inputs: - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } + implementation: openstack.openstack_plugin.resources.compute.host_aggregate.delete cloudify.interfaces.operations: update: - implementation: openstack.glance_plugin.image.update + implementation: openstack.openstack_plugin.resources.compute.host_aggregate.update inputs: args: default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } list: - implementation: openstack.glance_plugin.image.list_images + implementation: openstack.openstack_plugin.resources.compute.host_aggregate.list_aggregates + add_hosts: + implementation: openstack.openstack_plugin.resources.compute.host_aggregate.add_hosts inputs: - args: - default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - - cloudify.openstack.nodes.Project: - derived_from: cloudify.nodes.Root - properties: - project: - default: {} - description: > - The project object as described in Openstack identity API (create project method): - https://developer.openstack.org/api-ref/identity/v3/#projects - users: - default: [] - description: > - List of users assigned to this project in the following format: - { name: string, roles: [string] } - quota: - default: {} - description: | - A dictionary mapping service names to quota definitions for a projct - - e.g:: - - quota: - neutron: - nova: - use_external_resource: - type: boolean - default: false - description: > - a boolean for setting whether to create the resource or use an existing one. - See the using existing resources section. - create_if_missing: - type: boolean - default: false - description: > - If use_external_resource is ``true`` and the resource is missing, - create it instead of failing. - resource_id: + hosts: + default: [] + remove_hosts: + implementation: openstack.openstack_plugin.resources.compute.host_aggregate.remove_hosts + inputs: + hosts: + default: [] + + cloudify.nodes.openstack.Image: + derived_from: cloudify.nodes.Root + properties: + <<: *external_resource + <<: *client_config + image_url: type: string default: '' description: > - name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string). - openstack_config: - type: cloudify.openstack.types.configuration - default: {} - description: > - endpoints and authentication configuration for Openstack. - Expected to contain the following nested fields: - username, password, tenant_name, auth_url, region. + The openstack resource URL for the image. + resource_config: + type: cloudify.types.openstack.Image + description: https://developer.openstack.org/api-ref/image/v2/index.html?expanded=show-image-detail,create-image-detail#create-image + required: true interfaces: cloudify.interfaces.lifecycle: create: - implementation: openstack.keystone_plugin.project.create - inputs: - args: - default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } + implementation: openstack.openstack_plugin.resources.compute.image.create start: - implementation: openstack.keystone_plugin.project.start - inputs: - quota_dict: - default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } + implementation: openstack.openstack_plugin.resources.compute.image.start delete: - implementation: openstack.keystone_plugin.project.delete - inputs: - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } + implementation: openstack.openstack_plugin.resources.compute.image.delete + cloudify.interfaces.validation: + creation: + implementation: openstack.openstack_plugin.resources.compute.image.creation_validation cloudify.interfaces.operations: - update_project: - implementation: openstack.keystone_plugin.project.update_project + update: + implementation: openstack.openstack_plugin.resources.compute.image.update inputs: args: default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - get_quota: - implementation: openstack.keystone_plugin.project.get_project_quota - inputs: - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - update_quota: - implementation: openstack.keystone_plugin.project.update_project_quota - inputs: - quota: - default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } list: - implementation: openstack.keystone_plugin.project.list_projects + implementation: openstack.openstack_plugin.resources.compute.image.list_images inputs: - args: + query: default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - cloudify.interfaces.validation: - creation: openstack.keystone_plugin.project.creation_validation - - cloudify.openstack.nodes.User: + cloudify.nodes.openstack.Flavor: derived_from: cloudify.nodes.Root properties: - user: - default: {} - description: > - The user object as described in Openstack identity API (create user method): - https://developer.openstack.org/api-ref/identity/v3/#users - use_external_resource: - default: false - description: > - a boolean for setting whether to create the resource or use an existing one. - See the using existing resources section. - create_if_missing: - default: false - description: > - If use_external_resource is ``true`` and the resource is missing, - create it instead of failing. - resource_id: - default: '' - description: > - name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string). - openstack_config: - default: {} - description: > - endpoints and authentication configuration for Openstack. - Expected to contain the following nested fields: - username, password, tenant_name, auth_url, region. + <<: *external_resource + <<: *client_config + resource_config: + type: cloudify.types.openstack.Flavor + description: https://developer.openstack.org/api-ref/compute/?expanded=create-flavor-detail + required: true interfaces: cloudify.interfaces.lifecycle: create: - implementation: openstack.keystone_plugin.user.create + implementation: openstack.openstack_plugin.resources.compute.flavor.create + delete: + implementation: openstack.openstack_plugin.resources.compute.flavor.delete + cloudify.interfaces.operations: + update: + implementation: openstack.openstack_plugin.resources.compute.flavor.update inputs: args: default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - delete: - implementation: openstack.keystone_plugin.user.delete + list: + implementation: openstack.openstack_plugin.resources.compute.flavor.list_flavors inputs: - openstack_config: + query: default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } + details: + default: True + + cloudify.nodes.openstack.User: + derived_from: cloudify.nodes.Root + properties: + <<: *external_resource + <<: *client_config + resource_config: + type: cloudify.types.openstack.User + description: https://developer.openstack.org/api-ref/identity/v3/?expanded=update-user.yaml-detail,create-user.yaml-detail,update-project-detail#users + required: true + interfaces: + cloudify.interfaces.lifecycle: + create: + implementation: openstack.openstack_plugin.resources.identity.user.create + delete: + implementation: openstack.openstack_plugin.resources.identity.user.delete cloudify.interfaces.operations: update: - implementation: openstack.keystone_plugin.user.update + implementation: openstack.openstack_plugin.resources.identity.user.update inputs: args: default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } list: - implementation: openstack.keystone_plugin.user.list_users + implementation: openstack.openstack_plugin.resources.identity.user.list_users inputs: - args: + query: default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - cloudify.openstack.nodes.HostAggregate: + cloudify.nodes.openstack.Project: derived_from: cloudify.nodes.Root properties: - aggregate: - default: {} - description: > - The host-aggregate object as described in Openstack compute API (create host-aggregate method): - https://developer.openstack.org/api-ref/compute/#host-aggregates-os-aggregates - hosts: - required: false - description: > - list of hosts IDs, which will be a members of host aggregate - metadata: - required: false - description: > - metadata entries in : format - use_external_resource: - default: false - description: > - a boolean for setting whether to create the resource or use an existing one. - See the using existing resources section. - create_if_missing: - default: false - description: > - If use_external_resource is ``true`` and the resource is missing, - create it instead of failing. - resource_id: - default: '' - description: > - name to give to the new resource or the name or ID of an existing resource when the ``use_external_resource`` property is set to ``true`` (see the using existing resources section). Defaults to '' (empty string). - openstack_config: - default: {} + <<: *external_resource + <<: *client_config + users: + default: [] description: > - endpoints and authentication configuration for Openstack. - Expected to contain the following nested fields: - username, password, tenant_name, auth_url, region. + List of users assigned to this project in the following format: + { name: string, roles: [string] } +# quota: +# default: {} +# description: | +# A dictionary mapping service names to quota definitions for a project +# +# e.g:: +# +# quota: +# compute: +# network: +# volume: + resource_config: + type: cloudify.types.openstack.Project + description: https://developer.openstack.org/api-ref/identity/v3/?expanded=update-user.yaml-detail,create-user.yaml-detail,update-project-detail#projects + required: true interfaces: cloudify.interfaces.lifecycle: create: - implementation: openstack.nova_plugin.host_aggregate.create - inputs: - args: - default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } + implementation: openstack.openstack_plugin.resources.identity.project.create + start: + implementation: openstack.openstack_plugin.resources.identity.project.start delete: - implementation: openstack.nova_plugin.host_aggregate.delete - inputs: - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } + implementation: openstack.openstack_plugin.resources.identity.project.delete cloudify.interfaces.operations: - update: - implementation: openstack.nova_plugin.host_aggregate.update + update_project: + implementation: openstack.openstack_plugin.resources.identity.project.update inputs: args: default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - list: - implementation: openstack.nova_plugin.host_aggregate.list_host_aggregates - inputs: - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - add_hosts: - implementation: openstack.nova_plugin.host_aggregate.add_hosts + get_quota: + implementation: openstack.openstack_plugin.resources.identity.project.get_project_quota + update_quota: + implementation: openstack.openstack_plugin.resources.identity.project.update_project_quota inputs: - openstack_config: + quota: default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - hosts: - default: [] - remove_hosts: - implementation: openstack.nova_plugin.host_aggregate.remove_hosts + list: + implementation: openstack.openstack_plugin.resources.identity.project.list_projects inputs: - openstack_config: + query: default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } - hosts: - default: [] + cloudify.interfaces.validation: + creation: openstack.openstack_plugin.resources.identity.project.creation_validation - cloudify.openstack.nodes.ServerGroup: + cloudify.nodes.openstack.Volume: derived_from: cloudify.nodes.Root properties: - server_group: - description: > - The server-group object as described in Openstack compute API (create server-group method): - https://developer.openstack.org/api-ref/compute/#server-groups-os-server-groups - default: {} - policy: - type: string - description: > - the policy of the server group, this must be either 'affinity', - 'anti-affinity', 'soft-affinity', or 'soft-anti-affinity'. - use_external_resource: - type: boolean - default: false - description: > - a boolean describing whether this resource should be - created or rather that it already exists on Openstack - and should be used as-is. - create_if_missing: - type: boolean - default: false - description: > - If use_external_resource is ``true`` and the resource is missing, - create it instead of failing. - resource_id: + <<: *external_resource + <<: *client_config + device_name: type: string - default: '' - description: > - the name that will be given to the resource on Openstack (excluding optional prefix). - If not provided, a default name will be given instead. - If use_external_resource is set to "true", this exact - value (without any prefixes applied) will be looked for - as either the name or id of an existing server group to be used. - openstack_config: - type: cloudify.openstack.types.configuration - default: {} + default: auto description: > - endpoints and authentication configuration for Openstack. - Expected to contain the following nested fields: - username, password, tenant_name, auth_url, region. + The device name this volume will be attached to. + Default value is *auto*, + which means openstack will auto-assign a device. + Note that if you do explicitly set a value, + this value may not be the actual device name assigned. + Sometimes the device requested will not be available and openstack will assign it to a different device, + this is why we recommend using *auto*. + resource_config: + type: cloudify.types.openstack.Volume + description: https://developer.openstack.org/api-ref/block-storage/v2/index.html?expanded=create-volume-detail#volumes-volumes + required: true interfaces: cloudify.interfaces.lifecycle: create: - implementation: openstack.nova_plugin.server_group.create + implementation: openstack.openstack_plugin.resources.volume.volume.create inputs: args: default: {} - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } + start: + implementation: openstack.openstack_plugin.resources.volume.volume.start + inputs: {} delete: - implementation: openstack.nova_plugin.server_group.delete - inputs: - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } + implementation: openstack.openstack_plugin.resources.volume.volume.delete + # backups related section + cloudify.interfaces.snapshot: + create: + implementation: openstack.openstack_plugin.resources.volume.volume.snapshot_create + inputs: {} + apply: + implementation: openstack.openstack_plugin.resources.volume.volume.snapshot_apply + inputs: {} + delete: + implementation: openstack.openstack_plugin.resources.volume.volume.snapshot_delete + inputs: {} cloudify.interfaces.validation: creation: - implementation: openstack.nova_plugin.server_group.creation_validation - inputs: - openstack_config: - default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } + implementation: openstack.openstack_plugin.resources.volume.volume.creation_validation cloudify.interfaces.operations: list: - implementation: openstack.nova_plugin.server_group.list_servergroups + implementation: openstack.openstack_plugin.resources.volume.volume.list_volumes inputs: - args: - default: {} - openstack_config: + query: default: {} - resource_id: - type: string - default: { get_property: [ SELF, resource_id ] } + + cloudify.nodes.openstack.VolumeType: + derived_from: cloudify.nodes.Root + properties: + <<: *external_resource + <<: *client_config + resource_config: + type: cloudify.types.openstack.VolumeType + description: https://developer.openstack.org/api-ref/block-storage/v3/index.html?expanded=create-a-volume-type-detail#volume-types-types + required: true + interfaces: + cloudify.interfaces.lifecycle: + create: + implementation: openstack.openstack_plugin.resources.volume.volume_type.create + delete: + implementation: openstack.openstack_plugin.resources.volume.volume_type.delete + relationships: - cloudify.openstack.port_connected_to_security_group: + + cloudify.relationships.openstack.server_connected_to_keypair: derived_from: cloudify.relationships.connected_to - source_interfaces: - cloudify.interfaces.relationship_lifecycle: - establish: - implementation: openstack.neutron_plugin.port.connect_security_group - inputs: - openstack_config: - default: {} - unlink: - implementation: openstack.neutron_plugin.port.disconnect_security_group - inputs: - openstack_config: - default: {} - cloudify.openstack.subnet_connected_to_router: + cloudify.relationships.openstack.server_connected_to_server_group: + derived_from: cloudify.relationships.connected_to + + # This is a little confusing, so I will make sure it is clear. + # The relationship is from a cloudify.nodes.openstack.Server to a cloudify.nodes.openstack.Port. + # The operation occurs on a Port object. We use target_interfaces to get that resource. + # We get the source ID (the server) to set the device_id on the port. + cloudify.relationships.openstack.server_connected_to_port: derived_from: cloudify.relationships.connected_to - target_interfaces: - cloudify.interfaces.relationship_lifecycle: - postconfigure: - implementation: openstack.neutron_plugin.router.connect_subnet - inputs: - openstack_config: - default: {} - unlink: - implementation: openstack.neutron_plugin.router.disconnect_subnet - inputs: - openstack_config: - default: {} - cloudify.openstack.server_connected_to_floating_ip: + # The API used in order to connect server to floating IP is deprecated + # The API is deprecated and will fail with a 404 starting from microversion 2.44. This is replaced with using the Neutron networking service API. + # https://developer.openstack.org/api-ref/compute/?expanded=create-aggregate-detail,add-associate-floating-ip-addfloatingip-action-deprecated-detail + cloudify.relationships.openstack.server_connected_to_floating_ip: derived_from: cloudify.relationships.connected_to source_interfaces: cloudify.interfaces.relationship_lifecycle: establish: - implementation: openstack.nova_plugin.server.connect_floatingip + implementation: openstack.openstack_plugin.resources.compute.server.connect_floating_ip inputs: + floating_ip: + default: { get_attribute: [ TARGET, floating_ip_address ] } fixed_ip: description: > - The fixed IP to be associated with the floating IP. - If omitted, Openstack will choose which port to associate. + The fixed IP address to be associated with the + floating IP address. Used when the server is + connected to multiple networks. type: string default: '' - openstack_config: - default: {} unlink: - implementation: openstack.nova_plugin.server.disconnect_floatingip + implementation: openstack.openstack_plugin.resources.compute.server.disconnect_floating_ip inputs: - openstack_config: - default: {} + floating_ip: + default: { get_attribute: [ TARGET, floating_ip_address ] } - cloudify.openstack.port_connected_to_floating_ip: + cloudify.relationships.openstack.server_connected_to_security_group: derived_from: cloudify.relationships.connected_to source_interfaces: cloudify.interfaces.relationship_lifecycle: establish: - implementation: openstack.neutron_plugin.floatingip.connect_port + implementation: openstack.openstack_plugin.resources.compute.server.connect_security_group inputs: - openstack_config: - default: {} + security_group_id: + default: { get_attribute: [ TARGET, id ] } unlink: - implementation: openstack.neutron_plugin.floatingip.disconnect_port + implementation: openstack.openstack_plugin.resources.compute.server.disconnect_security_group inputs: - openstack_config: - default: {} + security_group_id: + default: { get_attribute: [ TARGET, id ] } - cloudify.openstack.server_connected_to_security_group: + cloudify.relationships.openstack.volume_attached_to_server: derived_from: cloudify.relationships.connected_to - source_interfaces: + target_interfaces: cloudify.interfaces.relationship_lifecycle: establish: - implementation: openstack.nova_plugin.server.connect_security_group - inputs: - openstack_config: - default: {} + implementation: openstack.openstack_plugin.resources.compute.server.attach_volume + inputs: {} + unlink: + implementation: openstack.openstack_plugin.resources.compute.server.detach_volume + inputs: {} + + cloudify.relationships.openstack.rbac_policy_applied_to: + derived_from: cloudify.relationships.connected_to + target_interfaces: + cloudify.interfaces.relationship_lifecycle: unlink: - implementation: openstack.nova_plugin.server.disconnect_security_group + implementation: openstack.openstack_plugin.resources.network.rbac_policy.unlink_target_object inputs: - openstack_config: - default: {} + resource_id: + default: { get_attribute: [ TARGET, id ] } + disable_dhcp: + default: false + clean_ports: + default: false - cloudify.openstack.server_connected_to_port: + cloudify.relationships.openstack.port_connected_to_router: derived_from: cloudify.relationships.connected_to - source_interfaces: + target_interfaces: cloudify.interfaces.relationship_lifecycle: - establish: - implementation: openstack.neutron_plugin.port.attach + postconfigure: + implementation: openstack.openstack_plugin.resources.network.router.add_interface_to_router inputs: - openstack_config: - default: {} + port_id: + default: { get_attribute: [ SOURCE, id ] } unlink: - implementation: openstack.neutron_plugin.port.detach + implementation: openstack.openstack_plugin.resources.network.router.remove_interface_from_router inputs: - openstack_config: - default: {} + port_id: + default: { get_attribute: [ SOURCE, id ] } + + cloudify.relationships.openstack.port_connected_to_security_group: + derived_from: cloudify.relationships.connected_to - cloudify.openstack.server_connected_to_keypair: + cloudify.relationships.openstack.port_connected_to_subnet: derived_from: cloudify.relationships.connected_to - cloudify.openstack.port_connected_to_subnet: + cloudify.relationships.openstack.route_connected_to_router: derived_from: cloudify.relationships.connected_to - cloudify.openstack.volume_attached_to_server: + cloudify.relationships.openstack.subnet_connected_to_router: derived_from: cloudify.relationships.connected_to target_interfaces: cloudify.interfaces.relationship_lifecycle: - establish: - implementation: openstack.nova_plugin.server.attach_volume + postconfigure: + implementation: openstack.openstack_plugin.resources.network.router.add_interface_to_router inputs: - openstack_config: - default: {} - status_attempts: - description: > - Number of times to check for the attachment's status before failing - type: integer - default: 10 - status_timeout: - description: > - Interval (in seconds) between subsequent inquiries of the attachment's - status - type: integer - default: 2 + subnet_id: + default: { get_attribute: [ SOURCE, id ] } unlink: - implementation: openstack.nova_plugin.server.detach_volume + implementation: openstack.openstack_plugin.resources.network.router.remove_interface_from_router inputs: - openstack_config: - default: {} - status_attempts: - description: > - Number of times to check for the detachment's status before failing - type: integer - default: 10 - status_timeout: - description: > - Interval (in seconds) between subsequent inquiries of the detachment's - status - type: integer - default: 2 - - cloudify.openstack.server_connected_to_server_group: - derived_from: cloudify.relationships.connected_to - - cloudify.openstack.rbac_policy_applied_to: - derived_from: cloudify.relationships.connected_to + subnet_id: + default: { get_attribute: [ SOURCE, id ] } - cloudify.openstack.route_connected_to_router: + cloudify.relationships.openstack.port_connected_to_floating_ip: derived_from: cloudify.relationships.connected_to + target_interfaces: + cloudify.interfaces.relationship_lifecycle: + postconfigure: + implementation: openstack.openstack_plugin.resources.network.floating_ip.update + inputs: + args: + default: + port_id: { get_attribute: [ SOURCE, id ] } + unlink: + implementation: openstack.openstack_plugin.resources.network.floating_ip.update + inputs: + args: + default: + port_id: '' diff --git a/readthedocs.yml b/readthedocs.yml deleted file mode 100644 index af59f269..00000000 --- a/readthedocs.yml +++ /dev/null @@ -1 +0,0 @@ -requirements_file: docs/requirements.txt diff --git a/setup.py b/setup.py index 82e55010..f3906ea8 100644 --- a/setup.py +++ b/setup.py @@ -1,45 +1,30 @@ -######### -# Copyright (c) 2018 GigaSpaces Technologies Ltd. All rights reserved +######## +# Copyright (c) 2018 Cloudify Platform Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. + from setuptools import setup +from setuptools import find_packages setup( - zip_safe=True, name='cloudify-openstack-plugin', - version='2.14.8', + version='3.0.0', author='Cloudify', - author_email='hello@cloudify.co', - packages=[ - 'openstack_plugin_common', - 'nova_plugin', - 'neutron_plugin', - 'cinder_plugin', - 'glance_plugin', - 'keystone_plugin' - ], + author_email='info@cloudify.co', license='LICENSE', - description='Cloudify plugin for OpenStack infrastructure.', - install_requires=[ - 'cloudify-plugins-common>=3.4.2', - 'keystoneauth1>=2.16.0,<3', - 'python-novaclient==7.0.0', - 'python-keystoneclient==3.5.0', - 'python-neutronclient==6.0.0', - 'python-cinderclient==1.9.0', - 'python-glanceclient==2.5.0', - 'IPy==0.81' - ] -) + zip_safe=False, + packages=find_packages(exclude=['tests*']), + install_requires=['cloudify-common', 'openstacksdk'], + test_requires=['mock', 'requests-mock']) diff --git a/system_tests/__init__.py b/system_tests/__init__.py deleted file mode 100644 index 3ad9513f..00000000 --- a/system_tests/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from pkgutil import extend_path -__path__ = extend_path(__path__, __name__) diff --git a/system_tests/openstack_handler.py b/system_tests/openstack_handler.py deleted file mode 100644 index 76368fa1..00000000 --- a/system_tests/openstack_handler.py +++ /dev/null @@ -1,657 +0,0 @@ -######## -# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - -import random -import logging -import os -import time -import copy -from contextlib import contextmanager - -from cinderclient import client as cinderclient -from keystoneauth1 import loading, session -import novaclient.client as nvclient -import neutronclient.v2_0.client as neclient -from retrying import retry - -from cosmo_tester.framework.handlers import ( - BaseHandler, - BaseCloudifyInputsConfigReader) -from cosmo_tester.framework.util import get_actual_keypath - -logging.getLogger('neutronclient.client').setLevel(logging.INFO) -logging.getLogger('novaclient.client').setLevel(logging.INFO) - - -VOLUME_TERMINATION_TIMEOUT_SECS = 300 - - -class OpenstackCleanupContext(BaseHandler.CleanupContext): - - def __init__(self, context_name, env): - super(OpenstackCleanupContext, self).__init__(context_name, env) - self.before_run = self.env.handler.openstack_infra_state() - - def cleanup(self): - """ - Cleans resources created by the test. - Resource that existed before the test will not be removed - """ - super(OpenstackCleanupContext, self).cleanup() - resources_to_teardown = self.get_resources_to_teardown( - self.env, resources_to_keep=self.before_run) - if self.skip_cleanup: - self.logger.warn('[{0}] SKIPPING cleanup of resources: {1}' - .format(self.context_name, resources_to_teardown)) - else: - self._clean(self.env, resources_to_teardown) - - @classmethod - def clean_all(cls, env): - """ - Cleans *all* resources, including resources that were not - created by the test - """ - super(OpenstackCleanupContext, cls).clean_all(env) - resources_to_teardown = cls.get_resources_to_teardown(env) - cls._clean(env, resources_to_teardown) - - @classmethod - def _clean(cls, env, resources_to_teardown): - cls.logger.info('Openstack handler will try to remove these resources:' - ' {0}'.format(resources_to_teardown)) - failed_to_remove = env.handler.remove_openstack_resources( - resources_to_teardown) - if failed_to_remove: - trimmed_failed_to_remove = {key: value for key, value in - failed_to_remove.iteritems() - if value} - if len(trimmed_failed_to_remove) > 0: - msg = 'Openstack handler failed to remove some resources:' \ - ' {0}'.format(trimmed_failed_to_remove) - cls.logger.error(msg) - raise RuntimeError(msg) - - @classmethod - def get_resources_to_teardown(cls, env, resources_to_keep=None): - all_existing_resources = env.handler.openstack_infra_state() - if resources_to_keep: - return env.handler.openstack_infra_state_delta( - before=resources_to_keep, after=all_existing_resources) - else: - return all_existing_resources - - def update_server_id(self, server_name): - - # retrieve the id of the new server - nova, _, _ = self.env.handler.openstack_clients() - servers = nova.servers.list( - search_opts={'name': server_name}) - if len(servers) > 1: - raise RuntimeError( - 'Expected 1 server with name {0}, but found {1}' - .format(server_name, len(servers))) - - new_server_id = servers[0].id - - # retrieve the id of the old server - old_server_id = None - servers = self.before_run['servers'] - for server_id, name in servers.iteritems(): - if server_name == name: - old_server_id = server_id - break - if old_server_id is None: - raise RuntimeError( - 'Could not find a server with name {0} ' - 'in the internal cleanup context state' - .format(server_name)) - - # replace the id in the internal state - servers[new_server_id] = servers.pop(old_server_id) - - -class CloudifyOpenstackInputsConfigReader(BaseCloudifyInputsConfigReader): - - def __init__(self, cloudify_config, manager_blueprint_path, **kwargs): - super(CloudifyOpenstackInputsConfigReader, self).__init__( - cloudify_config, manager_blueprint_path=manager_blueprint_path, - **kwargs) - - @property - def region(self): - return self.config['region'] - - @property - def management_server_name(self): - return self.config['manager_server_name'] - - @property - def agent_key_path(self): - return self.config['agent_private_key_path'] - - @property - def management_user_name(self): - return self.config['ssh_user'] - - @property - def management_key_path(self): - return self.config['ssh_key_filename'] - - @property - def agent_keypair_name(self): - return self.config['agent_public_key_name'] - - @property - def management_keypair_name(self): - return self.config['manager_public_key_name'] - - @property - def use_existing_agent_keypair(self): - return self.config['use_existing_agent_keypair'] - - @property - def use_existing_manager_keypair(self): - return self.config['use_existing_manager_keypair'] - - @property - def external_network_name(self): - return self.config['external_network_name'] - - @property - def keystone_username(self): - return self.config['keystone_username'] - - @property - def keystone_password(self): - return self.config['keystone_password'] - - @property - def keystone_tenant_name(self): - return self.config['keystone_tenant_name'] - - @property - def keystone_url(self): - return self.config['keystone_url'] - - @property - def neutron_url(self): - return self.config.get('neutron_url', None) - - @property - def management_network_name(self): - return self.config['management_network_name'] - - @property - def management_subnet_name(self): - return self.config['management_subnet_name'] - - @property - def management_router_name(self): - return self.config['management_router'] - - @property - def agents_security_group(self): - return self.config['agents_security_group_name'] - - @property - def management_security_group(self): - return self.config['manager_security_group_name'] - - -class OpenstackHandler(BaseHandler): - - CleanupContext = OpenstackCleanupContext - CloudifyConfigReader = CloudifyOpenstackInputsConfigReader - - def before_bootstrap(self): - super(OpenstackHandler, self).before_bootstrap() - with self.update_cloudify_config() as patch: - suffix = '-%06x' % random.randrange(16 ** 6) - server_name_prop_path = 'manager_server_name' - patch.append_value(server_name_prop_path, suffix) - - def after_bootstrap(self, provider_context): - super(OpenstackHandler, self).after_bootstrap(provider_context) - resources = provider_context['resources'] - agent_keypair = resources['agents_keypair'] - management_keypair = resources['management_keypair'] - self.remove_agent_keypair = agent_keypair['external_resource'] is False - self.remove_management_keypair = \ - management_keypair['external_resource'] is False - - def after_teardown(self): - super(OpenstackHandler, self).after_teardown() - if self.remove_agent_keypair: - agent_key_path = get_actual_keypath(self.env, - self.env.agent_key_path, - raise_on_missing=False) - if agent_key_path: - os.remove(agent_key_path) - if self.remove_management_keypair: - management_key_path = get_actual_keypath( - self.env, - self.env.management_key_path, - raise_on_missing=False) - if management_key_path: - os.remove(management_key_path) - - def openstack_clients(self): - creds = self._client_creds() - params = { - 'region_name': creds.pop('region_name'), - } - - loader = loading.get_plugin_loader("password") - auth = loader.load_from_options(**creds) - sess = session.Session(auth=auth, verify=True) - - params['session'] = sess - - nova = nvclient.Client('2', **params) - neutron = neclient.Client(**params) - cinder = cinderclient.Client('2', **params) - - return (nova, neutron, cinder) - - @retry(stop_max_attempt_number=5, wait_fixed=20000) - def openstack_infra_state(self): - """ - @retry decorator is used because this error sometimes occur: - ConnectionFailed: Connection to neutron failed: Maximum - attempts reached - """ - nova, neutron, cinder = self.openstack_clients() - try: - prefix = self.env.resources_prefix - except (AttributeError, KeyError): - prefix = '' - return { - 'networks': dict(self._networks(neutron, prefix)), - 'subnets': dict(self._subnets(neutron, prefix)), - 'routers': dict(self._routers(neutron, prefix)), - 'security_groups': dict(self._security_groups(neutron, prefix)), - 'servers': dict(self._servers(nova, prefix)), - 'key_pairs': dict(self._key_pairs(nova, prefix)), - 'floatingips': dict(self._floatingips(neutron, prefix)), - 'ports': dict(self._ports(neutron, prefix)), - 'volumes': dict(self._volumes(cinder, prefix)) - } - - def openstack_infra_state_delta(self, before, after): - after = copy.deepcopy(after) - return { - prop: self._remove_keys(after[prop], before[prop].keys()) - for prop in before - } - - def _find_keypairs_to_delete(self, nodes, node_instances): - """Filter the nodes only returning the names of keypair nodes - - Examine node_instances and nodes, return the external_name of - those node_instances, which correspond to a node that has a - type == KeyPair - - To filter by deployment_id, simply make sure that the nodes and - node_instances this method receives, are pre-filtered - (ie. filter the nodes while fetching them from the manager) - """ - keypairs = set() # a set of (deployment_id, node_id) tuples - - for node in nodes: - if node.get('type') != 'cloudify.openstack.nodes.KeyPair': - continue - # deployment_id isnt always present in local_env runs - key = (node.get('deployment_id'), node['id']) - keypairs.add(key) - - for node_instance in node_instances: - key = (node_instance.get('deployment_id'), - node_instance['node_id']) - if key not in keypairs: - continue - - runtime_properties = node_instance['runtime_properties'] - if not runtime_properties: - continue - name = runtime_properties.get('external_name') - if name: - yield name - - def _delete_keypairs_by_name(self, keypair_names): - nova, neutron, cinder = self.openstack_clients() - existing_keypairs = nova.keypairs.list() - - for name in keypair_names: - for keypair in existing_keypairs: - if keypair.name == name: - nova.keypairs.delete(keypair) - - def remove_keypairs_from_local_env(self, local_env): - """Query the local_env for nodes which are keypairs, remove them - - Similar to querying the manager, we can look up nodes in the local_env - which is used for tests. - """ - nodes = local_env.storage.get_nodes() - node_instances = local_env.storage.get_node_instances() - names = self._find_keypairs_to_delete(nodes, node_instances) - self._delete_keypairs_by_name(names) - - def remove_keypairs_from_manager(self, deployment_id=None, - rest_client=None): - """Query the manager for nodes by deployment_id, delete keypairs - - Fetch nodes and node_instances from the manager by deployment_id - (or all if not given), find which ones represent openstack keypairs, - remove them. - """ - if rest_client is None: - rest_client = self.env.rest_client - - nodes = rest_client.nodes.list(deployment_id=deployment_id) - node_instances = rest_client.node_instances.list( - deployment_id=deployment_id) - keypairs = self._find_keypairs_to_delete(nodes, node_instances) - self._delete_keypairs_by_name(keypairs) - - def remove_keypair(self, name): - """Delete an openstack keypair by name. If it doesnt exist, do nothing. - """ - self._delete_keypairs_by_name([name]) - - def remove_openstack_resources(self, resources_to_remove): - # basically sort of a workaround, but if we get the order wrong - # the first time, there is a chance things would better next time - # 3'rd time can't really hurt, can it? - # 3 is a charm - for _ in range(3): - resources_to_remove = self._remove_openstack_resources_impl( - resources_to_remove) - if all([len(g) == 0 for g in resources_to_remove.values()]): - break - # give openstack some time to update its data structures - time.sleep(3) - return resources_to_remove - - def _remove_openstack_resources_impl(self, resources_to_remove): - nova, neutron, cinder = self.openstack_clients() - - servers = nova.servers.list() - ports = neutron.list_ports()['ports'] - routers = neutron.list_routers()['routers'] - subnets = neutron.list_subnets()['subnets'] - networks = neutron.list_networks()['networks'] - # keypairs = nova.keypairs.list() - floatingips = neutron.list_floatingips()['floatingips'] - security_groups = neutron.list_security_groups()['security_groups'] - volumes = cinder.volumes.list() - - failed = { - 'servers': {}, - 'routers': {}, - 'ports': {}, - 'subnets': {}, - 'networks': {}, - 'key_pairs': {}, - 'floatingips': {}, - 'security_groups': {}, - 'volumes': {} - } - - volumes_to_remove = [] - for volume in volumes: - if volume.id in resources_to_remove['volumes']: - volumes_to_remove.append(volume) - - left_volumes = self._delete_volumes(nova, cinder, volumes_to_remove) - for volume_id, ex in left_volumes.iteritems(): - failed['volumes'][volume_id] = ex - - for server in servers: - if server.id in resources_to_remove['servers']: - with self._handled_exception(server.id, failed, 'servers'): - nova.servers.delete(server) - - for router in routers: - if router['id'] in resources_to_remove['routers']: - with self._handled_exception(router['id'], failed, 'routers'): - for p in neutron.list_ports( - device_id=router['id'])['ports']: - neutron.remove_interface_router(router['id'], { - 'port_id': p['id'] - }) - neutron.delete_router(router['id']) - - for port in ports: - if port['id'] in resources_to_remove['ports']: - with self._handled_exception(port['id'], failed, 'ports'): - neutron.delete_port(port['id']) - - for subnet in subnets: - if subnet['id'] in resources_to_remove['subnets']: - with self._handled_exception(subnet['id'], failed, 'subnets'): - neutron.delete_subnet(subnet['id']) - - for network in networks: - if network['name'] == self.env.external_network_name: - continue - if network['id'] in resources_to_remove['networks']: - with self._handled_exception(network['id'], failed, - 'networks'): - neutron.delete_network(network['id']) - - # TODO: implement key-pair creation and cleanup per tenant - # - # IMPORTANT: Do not remove key-pairs, they might be used - # by another tenant (of the same user) - # - # for key_pair in keypairs: - # if key_pair.name == self.env.agent_keypair_name and \ - # self.env.use_existing_agent_keypair: - # # this is a pre-existing agent key-pair, do not remove - # continue - # elif key_pair.name == self.env.management_keypair_name and \ - # self.env.use_existing_manager_keypair: - # # this is a pre-existing manager key-pair, do not remove - # continue - # elif key_pair.id in resources_to_remove['key_pairs']: - # with self._handled_exception(key_pair.id, failed, - # 'key_pairs'): - # nova.keypairs.delete(key_pair) - - for floatingip in floatingips: - if floatingip['id'] in resources_to_remove['floatingips']: - with self._handled_exception(floatingip['id'], failed, - 'floatingips'): - neutron.delete_floatingip(floatingip['id']) - - for security_group in security_groups: - if security_group['name'] == 'default': - continue - if security_group['id'] in resources_to_remove['security_groups']: - with self._handled_exception(security_group['id'], - failed, 'security_groups'): - neutron.delete_security_group(security_group['id']) - - return failed - - def _delete_volumes(self, nova, cinder, existing_volumes): - unremovables = {} - end_time = time.time() + VOLUME_TERMINATION_TIMEOUT_SECS - - for volume in existing_volumes: - # detach the volume - if volume.status in ['available', 'error', 'in-use']: - try: - self.logger.info('Detaching volume {0} ({1}), currently in' - ' status {2} ...'. - format(volume.name, volume.id, - volume.status)) - for attachment in volume.attachments: - nova.volumes.delete_server_volume( - server_id=attachment['server_id'], - attachment_id=attachment['id']) - except Exception as e: - self.logger.warning('Attempt to detach volume {0} ({1})' - ' yielded exception: "{2}"'. - format(volume.name, volume.id, - e)) - unremovables[volume.id] = e - existing_volumes.remove(volume) - - time.sleep(3) - for volume in existing_volumes: - # delete the volume - if volume.status in ['available', 'error', 'in-use']: - try: - self.logger.info('Deleting volume {0} ({1}), currently in' - ' status {2} ...'. - format(volume.name, volume.id, - volume.status)) - cinder.volumes.delete(volume) - except Exception as e: - self.logger.warning('Attempt to delete volume {0} ({1})' - ' yielded exception: "{2}"'. - format(volume.name, volume.id, - e)) - unremovables[volume.id] = e - existing_volumes.remove(volume) - - # wait for all volumes deletion until completed or timeout is reached - while existing_volumes and time.time() < end_time: - time.sleep(3) - for volume in existing_volumes: - volume_id = volume.id - volume_name = volume.name - try: - vol = cinder.volumes.get(volume_id) - if vol.status == 'deleting': - self.logger.debug('volume {0} ({1}) is being ' - 'deleted...'.format(volume_name, - volume_id)) - else: - self.logger.warning('volume {0} ({1}) is in ' - 'unexpected status: {2}'. - format(volume_name, volume_id, - vol.status)) - except Exception as e: - # the volume wasn't found, it was deleted - if hasattr(e, 'code') and e.code == 404: - self.logger.info('deleted volume {0} ({1})'. - format(volume_name, volume_id)) - existing_volumes.remove(volume) - else: - self.logger.warning('failed to remove volume {0} ' - '({1}), exception: {2}'. - format(volume_name, - volume_id, e)) - unremovables[volume_id] = e - existing_volumes.remove(volume) - - if existing_volumes: - for volume in existing_volumes: - # try to get the volume's status - try: - vol = cinder.volumes.get(volume.id) - vol_status = vol.status - except: - # failed to get volume... status is unknown - vol_status = 'unknown' - - unremovables[volume.id] = 'timed out while removing volume '\ - '{0} ({1}), current volume status '\ - 'is {2}'.format(volume.name, - volume.id, - vol_status) - - if unremovables: - self.logger.warning('failed to remove volumes: {0}'.format( - unremovables)) - - return unremovables - - def _client_creds(self): - return { - 'username': self.env.keystone_username, - 'password': self.env.keystone_password, - 'auth_url': self.env.keystone_url, - 'project_name': self.env.keystone_tenant_name, - 'region_name': self.env.region - } - - def _networks(self, neutron, prefix): - return [(n['id'], n['name']) - for n in neutron.list_networks()['networks'] - if self._check_prefix(n['name'], prefix)] - - def _subnets(self, neutron, prefix): - return [(n['id'], n['name']) - for n in neutron.list_subnets()['subnets'] - if self._check_prefix(n['name'], prefix)] - - def _routers(self, neutron, prefix): - return [(n['id'], n['name']) - for n in neutron.list_routers()['routers'] - if self._check_prefix(n['name'], prefix)] - - def _security_groups(self, neutron, prefix): - return [(n['id'], n['name']) - for n in neutron.list_security_groups()['security_groups'] - if self._check_prefix(n['name'], prefix)] - - def _servers(self, nova, prefix): - return [(s.id, s.human_id) - for s in nova.servers.list() - if self._check_prefix(s.human_id, prefix)] - - def _key_pairs(self, nova, prefix): - return [(kp.id, kp.name) - for kp in nova.keypairs.list() - if self._check_prefix(kp.name, prefix)] - - def _floatingips(self, neutron, prefix): - return [(ip['id'], ip['floating_ip_address']) - for ip in neutron.list_floatingips()['floatingips']] - - def _ports(self, neutron, prefix): - return [(p['id'], p['name']) - for p in neutron.list_ports()['ports'] - if self._check_prefix(p['name'], prefix)] - - def _volumes(self, cinder, prefix): - return [(v.id, v.name) for v in cinder.volumes.list() - if self._check_prefix(v.name, prefix)] - - def _check_prefix(self, name, prefix): - # some openstack resources (eg. volumes) can have no display_name, - # in which case it's None - return name is None or name.startswith(prefix) - - def _remove_keys(self, dct, keys): - for key in keys: - if key in dct: - del dct[key] - return dct - - @contextmanager - def _handled_exception(self, resource_id, failed, resource_group): - try: - yield - except BaseException, ex: - failed[resource_group][resource_id] = ex - - -handler = OpenstackHandler diff --git a/system_tests/openstack_nova_net_handler.py b/system_tests/openstack_nova_net_handler.py deleted file mode 100644 index 06fa0ab4..00000000 --- a/system_tests/openstack_nova_net_handler.py +++ /dev/null @@ -1,98 +0,0 @@ -######## -# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. - -import novaclient.v2.client as nvclient - -from system_tests.openstack_handler import OpenstackHandler - - -class OpenstackNovaNetHandler(OpenstackHandler): - - # using the Config Readers of the regular OpenstackHandler - attempts - # of reading neutron-related data may fail but shouldn't happen from - # nova-net tests in the first place - # CloudifyConfigReader = None - - def openstack_clients(self): - creds = self._client_creds() - return nvclient.Client(**creds) - - def openstack_infra_state(self): - nova = self.openstack_clients() - prefix = self.env.resources_prefix - return { - 'security_groups': dict(self._security_groups(nova, prefix)), - 'servers': dict(self._servers(nova, prefix)), - 'key_pairs': dict(self._key_pairs(nova, prefix)), - 'floatingips': dict(self._floatingips(nova, prefix)), - } - - def _floatingips(self, nova, prefix): - return [(ip.id, ip.ip) - for ip in nova.floating_ips.list()] - - def _security_groups(self, nova, prefix): - return [(n.id, n.name) - for n in nova.security_groups.list() - if self._check_prefix(n.name, prefix)] - - def _remove_openstack_resources_impl(self, resources_to_remove): - nova = self.openstack_clients() - - servers = nova.servers.list() - keypairs = nova.keypairs.list() - floatingips = nova.floating_ips.list() - security_groups = nova.security_groups.list() - - failed = { - 'servers': {}, - 'key_pairs': {}, - 'floatingips': {}, - 'security_groups': {} - } - - for server in servers: - if server.id in resources_to_remove['servers']: - with self._handled_exception(server.id, failed, 'servers'): - nova.servers.delete(server) - for key_pair in keypairs: - if key_pair.name == self.env.agent_keypair_name and \ - self.env.use_existing_agent_keypair: - # this is a pre-existing agent key-pair, do not remove - continue - elif key_pair.name == self.env.management_keypair_name and \ - self.env.use_existing_manager_keypair: - # this is a pre-existing manager key-pair, do not remove - continue - elif key_pair.id in resources_to_remove['key_pairs']: - with self._handled_exception(key_pair.id, failed, 'key_pairs'): - nova.keypairs.delete(key_pair) - for floatingip in floatingips: - if floatingip.id in resources_to_remove['floatingips']: - with self._handled_exception(floatingip.id, failed, - 'floatingips'): - nova.floating_ips.delete(floatingip) - for security_group in security_groups: - if security_group.name == 'default': - continue - if security_group.id in resources_to_remove['security_groups']: - with self._handled_exception(security_group.id, failed, - 'security_groups'): - nova.security_groups.delete(security_group) - - return failed - - -handler = OpenstackNovaNetHandler diff --git a/test-requirements.txt b/test-requirements.txt new file mode 100644 index 00000000..feb0850b --- /dev/null +++ b/test-requirements.txt @@ -0,0 +1,9 @@ +nose-testconfig>=0.9 +nose>=1.3 +nose-cov>=1.3 +mock>=1.0 +flake8 +tox +cloudify>=4.0 +python-openstackclient +sh diff --git a/tox.ini b/tox.ini index d1589907..3603901d 100644 --- a/tox.ini +++ b/tox.ini @@ -1,37 +1,22 @@ # content of: tox.ini , put in same dir as setup.py [tox] -envlist=flake8,docs,py27 +envlist=flake8,py27,validation [testenv] deps = + -rtest-requirements.txt -rdev-requirements.txt +[testenv:flake8] +commands=flake8 openstack_plugin openstack_sdk + [testenv:py27] -deps = - coverage==3.7.1 - nose - nose-cov - mock - testfixtures - {[testenv]deps} commands = - nosetests -v --cover-html --with-coverage --cov-report term-missing \ - --cover-package=cinder_plugin cinder_plugin/tests \ - --cover-package=glance_plugin glance_plugin/tests \ - --cover-package=keystone_plugin keystone_plugin/tests \ - --cover-package=neutron_plugin neutron_plugin/tests/test_port.py neutron_plugin/tests/test_security_group.py neutron_plugin/tests/test_rbac_policy.py \ - --cover-package=nova_plugin nova_plugin/tests \ - --cover-package=openstack_plugin_common openstack_plugin_common/tests \ - --with-xunit --xunit-file=nosetests.xml + nosetests --nocapture --cover-html --with-coverage \ + --cov-report term-missing \ + --cover-package=openstack_plugin openstack_plugin/tests \ + --cover-package=openstack_sdk openstack_sdk/tests \ + --with-xunit --xunit-file=nosetests.xml -[testenv:flake8] -deps = - flake8 - {[testenv]deps} -commands = - flake8 cinder_plugin - flake8 neutron_plugin - flake8 nova_plugin - flake8 openstack_plugin_common - flake8 glance_plugin - flake8 keystone_plugin +[testenv:validation] +commands=cfy blueprints validate examples/local/blueprint.yaml