diff --git a/.github/workflows/continuous-integration-workflow.yml b/.github/workflows/continuous-integration-workflow.yml new file mode 100644 index 0000000..e8accd9 --- /dev/null +++ b/.github/workflows/continuous-integration-workflow.yml @@ -0,0 +1,20 @@ +name: CI Workflow +on: + push: + branches: + - '**' # Run on commits to any branch +jobs: + build: + name: CI + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v3 + - name: Deps + run: make deps + - name: Type test + run: make test + - name: Type check + run: make type-check + - name: Lint + run: make lint diff --git a/Makefile b/Makefile index 9e4f509..9c522d2 100644 --- a/Makefile +++ b/Makefile @@ -22,7 +22,7 @@ lint: deps .PHONY: lint type-check: deps - ${activate} && ${python} -m mypy --no-color-output --pretty src + ${activate} && ${python} -m mypy --no-color-output src .PHONY: type-check test: deps diff --git a/README.md b/README.md index ab718c1..991889c 100644 --- a/README.md +++ b/README.md @@ -26,6 +26,32 @@ basis for later automation. # Usage ``` +$ ./openstack_workload_generator --help +usage: Create workloads on openstack installations [-h] [--log_level loglevel] [--os_cloud OS_CLOUD] [--ansible_inventory [ANSIBLE_INVENTORY]] [--config CONFIG] + (--create_domains DOMAINNAME [DOMAINNAME ...] | --delete_domains DOMAINNAME [DOMAINNAME ...]) + (--create_projects PROJECTNAME [PROJECTNAME ...] | --delete_projects PROJECTNAME [PROJECTNAME ...]) + (--create_machines SERVERNAME [SERVERNAME ...] | --delete_machines SERVERNAME [SERVERNAME ...]) + +options: + -h, --help show this help message and exit + --log_level loglevel The loglevel + --os_cloud OS_CLOUD The openstack config to use, defaults to the value of the OS_CLOUD environment variable or "admin" if the variable is not set + --ansible_inventory [ANSIBLE_INVENTORY] + Dump the created servers as an ansible inventory to the specified directory, adds a ssh proxy jump for the hosts without a floating ip + --config CONFIG The config file for environment creation, define a path to the yaml file or a subpath in the profiles folder + --create_domains DOMAINNAME [DOMAINNAME ...] + A list of domains to be created + --delete_domains DOMAINNAME [DOMAINNAME ...] + A list of domains to be deleted, all child elements are recursively deleted + --create_projects PROJECTNAME [PROJECTNAME ...] + A list of projects to be created in the created domains + --delete_projects PROJECTNAME [PROJECTNAME ...] + A list of projects to be deleted in the created domains, all child elements are recursively deleted + --create_machines SERVERNAME [SERVERNAME ...] + A list of vms to be created in the created domains + --delete_machines SERVERNAME [SERVERNAME ...] + A list of vms to be deleted in the created projects + ``` # Testing Scenarios diff --git a/openstack_workload_generator b/openstack_workload_generator index 0871b0b..06c5e8c 100755 --- a/openstack_workload_generator +++ b/openstack_workload_generator @@ -1,6 +1,6 @@ #!/bin/bash -rundir="$(dirname $(readlink -f $0))/" +rundir="$(dirname $(readlink -f $0))" cd "$rundir" || exit 1 modification_time(){ diff --git a/requirements.txt b/requirements.txt index 727fc81..a0b2c4f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,5 +8,5 @@ PyYAML==6.0.1 types-pyyaml openstacksdk==3.3.0 pytest==7.4.0 -mypy==1.4.1 +mypy==1.13.0 flake8==6.1.0 diff --git a/src/__init__.py b/src/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/openstack_workload_generator/__main__.py b/src/openstack_workload_generator/__main__.py index 9ea46f1..5ff2e60 100644 --- a/src/openstack_workload_generator/__main__.py +++ b/src/openstack_workload_generator/__main__.py @@ -1,17 +1,31 @@ #!/usr/bin/env python3 +import sys +import os import argparse import logging -import os -import sys import time -from entities.helpers import setup_logging, cloud_checker, item_checker from openstack.connection import Connection from openstack.config import loader -from entities.helpers import Config -from entities import WorkloadGeneratorDomain +# $ make type-check +# source venv/bin/activate && python3 -m mypy --no-color-output --pretty src +# src/openstack_workload_generator/__main__.py:12: error: Cannot find implementation or library +# stub for module named "entities" [import-not-found] +# from entities import WorkloadGeneratorDomain +# ^ +# src/openstack_workload_generator/__main__.py:13: error: Cannot find implementation or library stub for module +# named "entities.helpers" [import-not-found] +# from entities.helpers import setup_logging, cloud_checker, item_checker, Config +# ^ +# src/openstack_workload_generator/__main__.py:13: note: See +# https://mypy.readthedocs.io/en/stable/running_mypy.html#missing-imports +# Found 2 errors in 1 file (checked 9 source files) +# make: *** [Makefile:25: type-check] Error 1 + +from entities import WorkloadGeneratorDomain # type: ignore[import-not-found] +from entities.helpers import setup_logging, cloud_checker, item_checker, Config # type: ignore[import-not-found] LOGGER = logging.getLogger() @@ -48,21 +62,22 @@ exclusive_group_project = parser.add_mutually_exclusive_group(required=True) exclusive_group_project.add_argument('--create_projects', type=item_checker, nargs="+", default=None, - metavar="PROJECTNAME", - help='A list of projects to be created in the created domains') + metavar="PROJECTNAME", + help='A list of projects to be created in the created domains') exclusive_group_project.add_argument('--delete_projects', type=item_checker, nargs="+", default=None, - metavar="PROJECTNAME", - help='A list of projects to be deleted in the created domains, all child elements are recursively deleted') + metavar="PROJECTNAME", + help='A list of projects to be deleted in the created ' + 'domains, all child elements are recursively deleted') exclusive_group_machines = parser.add_mutually_exclusive_group(required=True) exclusive_group_machines.add_argument('--create_machines', type=item_checker, nargs="+", default=None, - metavar="SERVERNAME", - help='A list of vms to be created in the created domains') + metavar="SERVERNAME", + help='A list of vms to be created in the created domains') exclusive_group_machines.add_argument('--delete_machines', type=item_checker, nargs="+", default=None, - metavar="SERVERNAME", - help='A list of vms to be deleted in the created projects') + metavar="SERVERNAME", + help='A list of vms to be deleted in the created projects') args = parser.parse_args() @@ -77,6 +92,7 @@ def establish_connection(): cloud_config = config.get_one(args.os_cloud) return Connection(config=cloud_config) + time_start = time.time() Config.load_config(args.config) @@ -102,7 +118,7 @@ def establish_connection(): workload_project.dump_inventory_hosts(args.ansible_inventory) elif args.delete_machines: for machine_obj in workload_project.get_machines(args.delete_machines): - machine_obj.delete_machine() + machine_obj.delete_machine() sys.exit(0) elif args.delete_projects: conn = establish_connection() diff --git a/src/openstack_workload_generator/entities/__init__.py b/src/openstack_workload_generator/entities/__init__.py index 558b445..f34a785 100644 --- a/src/openstack_workload_generator/entities/__init__.py +++ b/src/openstack_workload_generator/entities/__init__.py @@ -1,7 +1,5 @@ +from .helpers import setup_logging, cloud_checker, item_checker, Config from .domain import WorkloadGeneratorDomain from .project import WorkloadGeneratorProject from .network import WorkloadGeneratorNetwork -from .user import WorkloadGeneratorTestUser - - - +from .user import WorkloadGeneratorUser diff --git a/src/openstack_workload_generator/entities/domain.py b/src/openstack_workload_generator/entities/domain.py index 68bc66f..addb4a2 100644 --- a/src/openstack_workload_generator/entities/domain.py +++ b/src/openstack_workload_generator/entities/domain.py @@ -6,10 +6,11 @@ from openstack.identity.v3.domain import Domain from .project import WorkloadGeneratorProject -from .user import WorkloadGeneratorTestUser +from .user import WorkloadGeneratorUser LOGGER = logging.getLogger() + class WorkloadGeneratorDomain: def __init__(self, conn: Connection, domain_name: str): @@ -17,7 +18,7 @@ def __init__(self, conn: Connection, domain_name: str): self.domain_name = domain_name self.obj: Domain = self.conn.identity.find_domain(domain_name) if self.obj: - DomainCache.add(self.obj.id,self.obj.name) + DomainCache.add(self.obj.id, self.obj.name) self.workload_user = WorkloadGeneratorDomain._get_user(conn, domain_name, self.obj) self.workload_projects: dict[str, WorkloadGeneratorProject] = WorkloadGeneratorDomain._get_projects( conn, self.obj, self.workload_user) @@ -26,10 +27,10 @@ def __init__(self, conn: Connection, domain_name: str): def _get_user(conn: Connection, domain_name: str, obj: Domain): if not obj: return None - return WorkloadGeneratorTestUser(conn, f"{domain_name}-admin", obj) + return WorkloadGeneratorUser(conn, f"{domain_name}-admin", obj) @staticmethod - def _get_projects(conn: Connection, domain: Domain | None, user: WorkloadGeneratorTestUser | None) \ + def _get_projects(conn: Connection, domain: Domain | None, user: WorkloadGeneratorUser | None) \ -> dict[str, WorkloadGeneratorProject]: if not domain or not user: return dict() @@ -58,12 +59,15 @@ def disable_domain(self): return domain def get_projects(self, projects: list[str]) -> list[WorkloadGeneratorProject]: + + result: list[WorkloadGeneratorProject] = [] if self.obj is None: - return [] + return result for project in projects: if project in self.workload_projects: - yield self.workload_projects[project] + result.append(self.workload_projects[project]) + return result def delete_domain(self): if self.obj is None: diff --git a/src/openstack_workload_generator/entities/helpers.py b/src/openstack_workload_generator/entities/helpers.py index 15cf3d6..c84eb62 100644 --- a/src/openstack_workload_generator/entities/helpers.py +++ b/src/openstack_workload_generator/entities/helpers.py @@ -15,80 +15,71 @@ class Config: - _config: dict[str, str | dict[str, str]] = \ + _config: dict[str, str | dict[str, str] | None] = \ { - 'admin_domain_password': None, - 'admin_vm_password': None, - 'admin_vm_ssh_key': None, + 'admin_domain_password': "", + 'admin_vm_password': "", + 'admin_vm_ssh_key': "", 'admin_vm_ssh_keypair_name': 'my_ssh_public_key', 'project_ipv4_subnet': '192.168.200.0/24', 'public_network': "public", - 'number_of_floating_ips_per_project': 1, + 'number_of_floating_ips_per_project': "1", 'vm_flavor': 'SCS-1L-1', 'vm_image': 'Ubuntu 24.04', - 'vm_volume_size_gb': 10, + 'vm_volume_size_gb': "10", 'cloud_init_extra_script': """#!/bin/bash\necho "HELLO WORLD"; date > READY; whoami >> READY""", - 'wait_for_server_timeout': 300, + 'wait_for_server_timeout': "300", } - _file: str = None + _file: str | None = None @staticmethod - def get(key: str, regex: str = ".+", - multi_line: bool = False, parent_key: str | None = None, default: str | list[str] | None = None) \ - -> str | list[ str]: - lines = [default] - try: - if parent_key: - lines = str(Config._config[parent_key][key]).splitlines() - else: - lines = str(Config._config[key]).splitlines() - except KeyError: - LOGGER.info(f"config does not contain : {parent_key or 'ROOT'} -> {key}, using >>>{default}<<<") - if lines is None: - sys.exit(1) - - if len(lines) > 1 and multi_line is False: - LOGGER.error(f"{key}='{Config._config[key]}' contains multiple lines") + def get(key: str, regex: str = ".+", multi_line: bool = False) -> str: + if key not in Config._config: + LOGGER.error(f"{key} not in config") sys.exit(1) - for line in lines: + values: list[str] = [] + if multi_line: + values = str(Config._config[key]).splitlines() + else: + values.append(str(Config._config[key])) + + for value in values: matcher = re.compile(regex, re.MULTILINE | re.DOTALL) - if not matcher.fullmatch(str(line)): - LOGGER.error(f"{key} : >>>{line}<<< : does not match to regex >>>{regex}<<<") + if not matcher.fullmatch(value): + LOGGER.error(f"{key} : >>>{value}<<< : does not match to regex >>>{regex}<<<") sys.exit(1) - if not multi_line: - return str(lines[0]) + if len(values) > 1: + return "\n".join(values) else: - return [str(val) for val in lines] + return values[0] @staticmethod def load_config(config_file: str): - potential_profile_file = \ - str(os.path.realpath( - os.path.dirname(os.path.realpath(__file__))) + f"/../../../profiles/{config_file}") - - if os.path.exists(config_file): - Config._file = config_file - elif not str(config_file).startswith("/") and os.path.exists(potential_profile_file): - Config._file = potential_profile_file - else: - LOGGER.error(f"Cannot find a profile at {config_file} or {potential_profile_file}") - sys.exit(1) - - Config._file = os.path.realpath(Config._file) - - try: - LOGGER.info(f"Reading {Config._file}") - with open(str(Config._file), 'r') as file_fd: - Config._config.update(yaml.safe_load(file_fd)) + potential_profile_file = \ + str(os.path.realpath( + os.path.dirname(os.path.realpath(__file__))) + f"/../../../profiles/{config_file}") + + if os.path.exists(config_file): + Config._file = config_file + elif not str(config_file).startswith("/") and os.path.exists(potential_profile_file): + Config._file = potential_profile_file + else: + LOGGER.error(f"Cannot find a profile at {config_file} or {potential_profile_file}") + sys.exit(1) - except Exception as e: - LOGGER.error(f"Unable to read configuration: {e}") - sys.exit(1) + Config._file = os.path.realpath(Config._file) + try: + LOGGER.info(f"Reading {Config._file}") + with open(str(Config._file), 'r') as file_fd: + Config._config.update(yaml.safe_load(file_fd)) + except Exception as e: + LOGGER.error(f"Unable to read configuration: {e}") + sys.exit(1) @staticmethod def check_config(cls): @@ -100,7 +91,7 @@ def check_config(cls): if quota_type not in Config._config: continue for key_name, value in Config._config[quota_type].keys(): - Config.quota(key_name,quota_type,"1") + Config.quota(key_name, quota_type, 1) @staticmethod def show_effective_config(): @@ -109,6 +100,7 @@ def show_effective_config(): "The effective configuration from %s : \n>>>\n%s\n<<<" % ( Config._file, pformat(Config._config, indent=2, compact=False)) ) + @staticmethod def get_public_network() -> str: return Config.get("public_network", "[a-zA-Z][a-zA-Z0-9]*") @@ -151,15 +143,32 @@ def get_project_ipv4_subnet() -> str: @staticmethod def get_admin_vm_ssh_key() -> str: - return "\n".join(Config.get("admin_vm_ssh_key", r"ssh-\S+\s\S+\s\S+", multi_line=True)) + return Config.get("admin_vm_ssh_key", r"ssh-\S+\s\S+\s\S+", multi_line=True) @staticmethod def get_admin_domain_password() -> str: return Config.get("admin_domain_password", regex=r".{5,}") @staticmethod - def quota(quota_name: str, quota_type: str, default_value: str) -> int: - return int(Config.get(quota_name, regex=f"\d+", parent_key=quota_type, default=default_value)) + def configured_quota_names(quota_category: str) -> list[str]: + if quota_category in Config._config: + value = Config._config[quota_category] + if isinstance(value, dict): + return list(value.keys()) + return [] + + @staticmethod + def quota(quota_name: str, quota_category: str, default_value: int) -> int: + if quota_category in Config._config: + value = Config._config.get(quota_name, default_value) + if isinstance(value, int): + return value + else: + LOGGER.error(f"Quota {quota_category} -> {quota_name} is not an integer") + sys.exit(1) + else: + return default_value + class DomainCache: _domains: dict[str, str] = dict() @@ -187,7 +196,7 @@ def ident_by_id(project_id: str) -> str: return f"project '{project}' in {domain}" @staticmethod - def add(project_id: str, data: dict[str,str]): + def add(project_id: str, data: dict[str, str]): ProjectCache.PROJECT_CACHE[project_id] = data diff --git a/src/openstack_workload_generator/entities/machine.py b/src/openstack_workload_generator/entities/machine.py index 3e06e16..bb73ad1 100644 --- a/src/openstack_workload_generator/entities/machine.py +++ b/src/openstack_workload_generator/entities/machine.py @@ -10,6 +10,7 @@ LOGGER = logging.getLogger() + class WorkloadGeneratorMachine: def __init__(self, conn: Connection, project: Project, machine_name: str, @@ -55,7 +56,8 @@ def wait_for_delete(self): def create_or_get_server(self, network: Network): if self.obj: - LOGGER.info(f"Server {self.obj.name}/{self.obj.id} in {ProjectCache.ident_by_id(self.obj.project_id)} already exists") + LOGGER.info( + f"Server {self.obj.name}/{self.obj.id} in {ProjectCache.ident_by_id(self.obj.project_id)} already exists") return # https://docs.openstack.org/openstacksdk/latest/user/resources/compute/v2/server.html#openstack.compute.v2.server.Server @@ -80,7 +82,10 @@ def create_or_get_server(self, network: Network): ], key_name=Config.get_admin_vm_ssh_keypair_name(), ) - LOGGER.info(f"Created server {self.obj.name}/{self.obj.id} in {ProjectCache.ident_by_id(network.project_id)}") + if self.obj: + LOGGER.info(f"Created server {self.obj.name}/{self.obj.id} in {ProjectCache.ident_by_id(network.project_id)}") + else: + raise RuntimeError(f"Unable to create server {self.machine_name} in {ProjectCache.ident_by_id(network.project_id)}") @staticmethod def _get_user_script() -> str: diff --git a/src/openstack_workload_generator/entities/network.py b/src/openstack_workload_generator/entities/network.py index fa21498..9abd801 100644 --- a/src/openstack_workload_generator/entities/network.py +++ b/src/openstack_workload_generator/entities/network.py @@ -12,6 +12,7 @@ LOGGER = logging.getLogger() + class WorkloadGeneratorNetwork: def __init__(self, conn: Connection, project: Project, @@ -43,7 +44,7 @@ def _find_security_group(name, conn: Connection, project: Project) -> SecurityGr return None @staticmethod - def _find_router(name, conn: Connection, project: Project) -> Network | None: + def _find_router(name, conn: Connection, project: Project) -> Router | None: routers = [router for router in conn.network.routers(name=name, project_id=project.id)] if len(routers) == 0: return None @@ -63,7 +64,7 @@ def _find_network(name, conn: Connection, project: Project) -> Network | None: raise RuntimeError(f"More the one network with the name {name} in {project.name}") @staticmethod - def _find_subnet(name, conn, project) -> Network | None: + def _find_subnet(name, conn, project) -> Subnet | None: subnet = [network for network in conn.network.subnets(name=name, project_id=project.id)] if len(subnet) == 0: return None @@ -94,6 +95,9 @@ def create_and_get_router(self, subnet: Subnet) -> Router | None: name=self.router_name, admin_state_up=True ) + if not self.obj_router: + raise RuntimeError(f"Unable to create Router '{self.router_name}'") + LOGGER.info(f"Router '{self.obj_router.name}' created with ID: {self.obj_router.id}") self.conn.network.update_router(self.obj_router, external_gateway_info={ 'network_id': public_network.id @@ -113,6 +117,9 @@ def create_and_get_network(self) -> Network: project_id=self.project.id, mtu=1342 ) + if not self.obj_network: + raise RuntimeError(f"Unable to create network {self.network_name}") + LOGGER.info( f"Created network {self.obj_network.name}/{self.obj_network.id} in {self.project.name}/{self.project.id}") return self.obj_network @@ -121,6 +128,9 @@ def create_and_get_subnet(self) -> Subnet: if self.obj_subnet: return self.obj_subnet + if not self.obj_network: + raise RuntimeError("No network object exists") + self.obj_subnet = self.conn.network.create_subnet( network_id=self.obj_network.id, project_id=self.project.id, @@ -130,6 +140,10 @@ def create_and_get_subnet(self) -> Subnet: enable_dhcp=True, dns_nameservers=["8.8.8.8", "9.9.9.9"] ) + + if not self.obj_subnet: + raise RuntimeError(f"No subnet created {self.network_name}") + LOGGER.info( f"Created subnet {self.obj_subnet.name}/{self.obj_subnet.id} in {self.project.name}/{self.project.id}") @@ -183,6 +197,10 @@ def create_and_get_ingress_security_group(self) -> SecurityGroup: name=self.security_group_name_ingress, description="Security group to allow SSH access to instances" ) + + if not self.obj_ingress_security_group: + raise RuntimeError("No ingress security group was created") + self.conn.network.create_security_group_rule( security_group_id=self.obj_ingress_security_group.id, direction='ingress', @@ -212,6 +230,10 @@ def create_and_get_egress_security_group(self) -> SecurityGroup: name=self.security_group_name_egress, description="Security group to allow outgoing access" ) + + if not self.obj_egress_security_group: + raise RuntimeError("No ingress security group was created") + self.conn.network.create_security_group_rule( security_group_id=self.obj_egress_security_group.id, direction='egress', diff --git a/src/openstack_workload_generator/entities/project.py b/src/openstack_workload_generator/entities/project.py index 060b053..f9001fe 100644 --- a/src/openstack_workload_generator/entities/project.py +++ b/src/openstack_workload_generator/entities/project.py @@ -10,15 +10,16 @@ from .helpers import ProjectCache, Config from .machine import WorkloadGeneratorMachine -from .user import WorkloadGeneratorTestUser +from .user import WorkloadGeneratorUser from .network import WorkloadGeneratorNetwork LOGGER = logging.getLogger() + class WorkloadGeneratorProject: def __init__(self, admin_conn: Connection, project_name: str, domain: Domain, - user: WorkloadGeneratorTestUser): + user: WorkloadGeneratorUser): self._admin_conn: Connection = admin_conn self._project_conn: Connection | None = None self.project_name: str = project_name @@ -26,7 +27,7 @@ def __init__(self, admin_conn: Connection, project_name: str, domain: Domain, self.security_group_name_egress: str = f"egress-any-{project_name}" self.domain: Domain = domain self.ssh_proxy_jump: str | None = None - self.user: WorkloadGeneratorTestUser = user + self.user: WorkloadGeneratorUser = user self.obj: Project = self._admin_conn.identity.find_project(project_name, domain_id=self.domain.id) if self.obj: ProjectCache.add(self.obj.id, {"name": self.obj.name, "domain_id": self.domain.id}) @@ -54,6 +55,8 @@ def project_conn(self) -> Connection: username=self.user.user_name, password=self.user.user_password, ) + if not self._project_conn: + raise RuntimeError(f"Unable to create a project connection {ProjectCache.ident_by_id(self.obj.id)}") return self._project_conn @staticmethod @@ -70,23 +73,25 @@ def _get_machines(conn: Connection, obj: Project, security_group_name_ingress: str, security_group_name_egress: str, ) -> dict[str, WorkloadGeneratorMachine]: - result = dict() + result: dict[str, WorkloadGeneratorMachine] = dict() if not obj: return result for server in conn.compute.servers(all_projects=True, project_id=obj.id): workload_server = WorkloadGeneratorMachine(conn, obj, server.name, security_group_name_ingress, - security_group_name_egress) + security_group_name_egress) workload_server.obj = server result[workload_server.machine_name] = workload_server return result def get_machines(self, machines: list[str]) -> list[WorkloadGeneratorMachine]: + result: list[WorkloadGeneratorMachine] = [] if self.obj is None: - return [] + return result for machine in machines: if machine in self.workload_machines: - yield self.workload_machines[machine] + result.append(self.workload_machines[machine]) + return result def get_role_id_by_name(self, role_name) -> str: for role in self._admin_conn.identity.roles(): @@ -105,37 +110,37 @@ def assign_role_to_global_admin_for_project(self, role_name: str): user=user_id, project=self.obj.id, role=self.get_role_id_by_name(role_name)) LOGGER.info(f"Assigned global admin {role_name} to {user_id} for {ProjectCache.ident_by_id(self.obj.id)}") - def _set_quota(self, quota_type: str): - if quota_type == "compute_quotas": + def _set_quota(self, quota_category: str): + if quota_category == "compute_quotas": api_area = "compute" current_quota = self._admin_conn.compute.get_quota_set(self.obj.id) - elif quota_type == "block_storage_quotas": + elif quota_category == "block_storage_quotas": api_area = "volume" current_quota = self._admin_conn.volume.get_quota_set(self.obj.id) - elif quota_type == "network_quotas": + elif quota_category == "network_quotas": api_area = "network" current_quota = self._admin_conn.get_network_quotas(self.obj.id) else: - raise RuntimeError(f"Not implemented: {quota_type}") + raise RuntimeError(f"Not implemented: {quota_category}") # service_obj = getattr(self._admin_conn, api_area) # current_quota = service_obj.get_quota_set(self.obj.id) - LOGGER.debug(f"current quotas for {quota_type} : {current_quota}") + LOGGER.debug(f"current quotas for {quota_category} : {current_quota}") new_quota = {} - if quota_type in Config._config: - for key_name in Config._config[quota_type].keys(): - try: - current_value = getattr(current_quota, key_name) - except AttributeError: - LOGGER.error(f"No such {api_area} quota field {key_name} in {current_quota}") - sys.exit() - - new_value = Config.quota(key_name, quota_type, str(getattr(current_quota, key_name))) - if current_value != new_value: - LOGGER.info(f"New {api_area} quota for {ProjectCache.ident_by_id(self.obj.id)}" - f": {key_name} : {current_value} -> {new_value}") - new_quota[key_name] = new_value + for key_name in Config.configured_quota_names(quota_category): + try: + current_value = getattr(current_quota, key_name) + except AttributeError: + LOGGER.error(f"No such {api_area} quota field {key_name} in {current_quota}") + sys.exit() + + new_value = Config.quota(key_name, quota_category, getattr(current_quota, key_name)) + if current_value != new_value: + LOGGER.info(f"New {api_area} quota for {ProjectCache.ident_by_id(self.obj.id)}" + f": {key_name} : {current_value} -> {new_value}") + new_quota[key_name] = new_value + if len(new_quota): set_quota_method = getattr(self._admin_conn, f"set_{api_area}_quotas") set_quota_method(self.obj.id, **new_quota) @@ -151,8 +156,9 @@ def adapt_quota(self): def create_and_get_project(self) -> Project: if self.obj: self.adapt_quota() - self.workload_network = WorkloadGeneratorNetwork(self._admin_conn, self.obj, self.security_group_name_ingress, - self.security_group_name_egress) + self.workload_network = WorkloadGeneratorNetwork(self._admin_conn, self.obj, + self.security_group_name_ingress, + self.security_group_name_egress) self.workload_network.create_and_get_network_setup() return self.obj @@ -171,7 +177,7 @@ def create_and_get_project(self) -> Project: self.assign_role_to_user_for_project("member") self.workload_network = WorkloadGeneratorNetwork(self.project_conn, self.obj, self.security_group_name_ingress, - self.security_group_name_egress) + self.security_group_name_egress) self.workload_network.create_and_get_network_setup() return self.obj @@ -221,6 +227,10 @@ def get_and_create_machines(self, machines: list[str]): if machine_name not in self.workload_machines: machine = WorkloadGeneratorMachine(self.project_conn, self.obj, machine_name, self.security_group_name_ingress, self.security_group_name_egress) + + if self.workload_network is None or self.workload_network.obj_network is None: + raise RuntimeError("No Workload network object") + machine.create_or_get_server(self.workload_network.obj_network) if machine.floating_ip: @@ -236,7 +246,9 @@ def get_and_create_machines(self, machines: list[str]): self.close_connection() def dump_inventory_hosts(self, directory_location: str): - for workload_machine in self.workload_machines.values(): + for name, workload_machine in self.workload_machines.items(): + if workload_machine.obj is None: + raise RuntimeError(f"Invalid reference to server for {workload_machine.machine_name}") workload_machine.update_assigned_ips() data = { @@ -263,7 +275,8 @@ def dump_inventory_hosts(self, directory_location: str): def get_or_create_ssh_key(self): self.ssh_key = self.project_conn.compute.find_keypair(Config.get_admin_vm_ssh_keypair_name()) if not self.ssh_key: - LOGGER.info(f"Create SSH keypair '{Config.get_admin_vm_ssh_keypair_name()} in {ProjectCache.ident_by_id(self.obj.id)}") + LOGGER.info( + f"Create SSH keypair '{Config.get_admin_vm_ssh_keypair_name()} in {ProjectCache.ident_by_id(self.obj.id)}") self.ssh_key = self.project_conn.compute.create_keypair( name=Config.get_admin_vm_ssh_keypair_name(), public_key=Config.get_admin_vm_ssh_key(), diff --git a/src/openstack_workload_generator/entities/user.py b/src/openstack_workload_generator/entities/user.py index 7275cf4..617abf4 100644 --- a/src/openstack_workload_generator/entities/user.py +++ b/src/openstack_workload_generator/entities/user.py @@ -4,11 +4,12 @@ from openstack.identity.v3.domain import Domain from openstack.identity.v3.user import User -from .helpers import Config,DomainCache +from .helpers import Config, DomainCache LOGGER = logging.getLogger() -class WorkloadGeneratorTestUser: + +class WorkloadGeneratorUser: def __init__(self, conn: Connection, user_name: str, domain: Domain): self.conn = conn @@ -19,7 +20,8 @@ def __init__(self, conn: Connection, user_name: str, domain: Domain): def assign_role_to_user(self, role_name: str): self.conn.identity.assign_project_role_to_user(self.obj.id, self.domain.id, self.get_role_id_by_name(role_name)) - LOGGER.info(f"Assigned role '{role_name}' to user '{self.obj.name}' in {DomainCache.ident_by_id(self.domain.id)}") + LOGGER.info( + f"Assigned role '{role_name}' to user '{self.obj.name}' in {DomainCache.ident_by_id(self.domain.id)}") def create_and_get_user(self) -> User: diff --git a/test/test_fake.py b/test/test_fake.py new file mode 100644 index 0000000..3c1c023 --- /dev/null +++ b/test/test_fake.py @@ -0,0 +1,4 @@ + + +def test_fake(): + assert 2 > 0