From e2cd4bd90629c00fabf8463f77908203a2a0478e Mon Sep 17 00:00:00 2001 From: Matthias De Block Date: Wed, 27 Sep 2017 12:16:31 +0200 Subject: [PATCH 01/28] Update settings.json --- packaging/settings.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/settings.json b/packaging/settings.json index 8939cda..595a226 100644 --- a/packaging/settings.json +++ b/packaging/settings.json @@ -5,6 +5,6 @@ "source_contents": "--transform 's,^,{0}-{1}/,' helpers remove setup validate __init__.py *.txt", "version": { "major": 0, - "minor": 4 + "minor": 5 } } From b8bd393d98ee48011c03d8008a54c680f8bd8706 Mon Sep 17 00:00:00 2001 From: simon Date: Fri, 3 Nov 2017 13:55:11 +0100 Subject: [PATCH 02/28] Integration testing of concurrent addition and removal of vpools --- helpers/api.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/helpers/api.py b/helpers/api.py index 59e5b69..7b7214b 100644 --- a/helpers/api.py +++ b/helpers/api.py @@ -286,8 +286,6 @@ def _to_json(dict_or_json): :return: json data :rtype: string """ - try: - json_object = json.loads(str(dict_or_json)) - except ValueError: + if isinstance(dict_or_json, dict): return json.dumps(dict_or_json) - return json_object + return dict_or_json From 5d8ca13466a7ddae74aeb0b84476bb67b0dbccd3 Mon Sep 17 00:00:00 2001 From: simon Date: Thu, 16 Nov 2017 15:39:07 +0100 Subject: [PATCH 03/28] changes integration tests -api usage changes -bugfixes --- docs/guide.md | 38 +++++-- helpers/albanode.py | 12 +-- helpers/api.py | 11 +- helpers/backend.py | 22 ++-- helpers/disk.py | 28 ++--- helpers/fstab.py | 126 +++++++++++------------ helpers/storagerouter.py | 134 ++++++------------------ helpers/vdisk.py | 23 ++--- remove/backend.py | 217 +++++++++++++++++++++++++++++++-------- remove/roles.py | 23 +++-- remove/vdisk.py | 47 ++++----- remove/vpool.py | 14 ++- setup/backend.py | 112 ++++++++------------ setup/domain.py | 38 +++---- setup/proxy.py | 1 + setup/roles.py | 37 +++---- setup/vdisk.py | 82 ++++++--------- setup/vpool.py | 17 ++- validate/roles.py | 14 ++- 19 files changed, 508 insertions(+), 488 deletions(-) diff --git a/docs/guide.md b/docs/guide.md index 0d2c301..76fa3f5 100644 --- a/docs/guide.md +++ b/docs/guide.md @@ -3,7 +3,6 @@ ## Description This repository contains the automation library for Open vStorage. -This library delegates component creation/removal to the REST API of Open vStorage through Python code. ## System requirements @@ -26,17 +25,36 @@ This library delegates component creation/removal to the REST API of Open vStora - Automation library HELPERS logging file `/var/log/ovs/helpers.log` ## Sections +### Api Library +This library delegates component creation/removal to the REST API of Open vStorage through Python code. + +#### Helpers section +Contains functions to assist in removal, setup and validation of components such as backends, disks, storagerouters and -drivers, as well as gathering of metadata etc. + +#### Remove section +Contains functions for removal of arakoon clusters, backends, roles, vDisks and vPools from Open vStorage. + +#### Setup section +Contains functions to set up new arakoon clusters, backends, domains, proxies, roles, vDisks and vPools in Open vStorage. -### Helpers section -Contains helping function that provide required meta information during setup, removal or validation +#### Validation section +Contains function to validate functionality of Open vStorage components. +This includes decorators for checking prerequisites of functions throughout the package. -### Remove section -Contains removal functions that makes it possible to remove components from Open vStorage +###Scenario helpers section +Classes in this section are used to execute the actual tests (referred to as[scenarios](#header_scenarios)) -### Setup section -Contains setup functions that makes it possible to add components to Open vStorage +###Scenarios section +This section contains code for testing a variety of integration scenarios.\ +Currently present tests: +- api checkup post-reboot +- several arakoon related checks +- addition and removal of + - backends + - storagerouters + - vDisks, vMachines and vPools +- health checks +- installation tests +- hypervisor tests -### Validation section -Provides validation for setup or removal of Open vStorage components. -E.g. when a vPool is added, the required components are checked if they are present diff --git a/helpers/albanode.py b/helpers/albanode.py index cfd0472..8ed76a0 100644 --- a/helpers/albanode.py +++ b/helpers/albanode.py @@ -13,12 +13,13 @@ # # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. +from ci.scenario_helpers.ci_constants import CIConstants from ovs.dal.hybrids.albanode import AlbaNode from ovs.dal.lists.albanodelist import AlbaNodeList from ovs.extensions.generic.logger import Logger -class AlbaNodeHelper(object): +class AlbaNodeHelper(CIConstants): """ Alba node helper class """ @@ -26,19 +27,16 @@ class AlbaNodeHelper(object): LOGGER = Logger('helpers-ci_albanode') IGNORE_KEYS = ('_error', '_duration', '_version', '_success') - @staticmethod - def _map_alba_nodes(api): + @classmethod + def _map_alba_nodes(cls): """ Will map the alba_node_id with its guid counterpart and return the map dict - :param api: specify a valid api connection to the setup - :type api: helpers.api.OVSClient """ mapping = {} - options = { 'contents': 'node_id,_relations', } - response = api.get( + response = cls.api.get( api='alba/nodes', params=options ) diff --git a/helpers/api.py b/helpers/api.py index 7b7214b..434b3f4 100644 --- a/helpers/api.py +++ b/helpers/api.py @@ -72,6 +72,7 @@ class OVSClient(object): disable_warnings(InsecureRequestWarning) disable_warnings(SNIMissingWarning) + def __init__(self, ip, username, password, verify=False, version='*', port=None, raw_response=False): """ Initializes the object with credentials and connection information @@ -185,6 +186,7 @@ def _process(self, response, overrule_raw=False): else: raise HttpException(status_code, message) + def _call(self, api, params, func, **kwargs): if not api.endswith('/'): api = '{0}/'.format(api) @@ -193,17 +195,17 @@ def _call(self, api, params, func, **kwargs): if self._volatile_client is not None: self._token = self._volatile_client.get(self._key) first_connect = self._token is None - headers, url = self._prepare(params=params) + headers, _url = self._prepare(params=params) try: - return self._process(func(url=url.format(api), headers=headers, verify=self._verify, **kwargs)) + return self._process(func(url=_url.format(api), headers=headers, verify=self._verify, **kwargs)) except ForbiddenException: if self._volatile_client is not None: self._volatile_client.delete(self._key) if first_connect is True: # First connect, so no token was present yet, so no need to try twice without token raise self._token = None - headers, url = self._prepare(params=params) - return self._process(func(url=url.format(api), headers=headers, verify=self._verify, **kwargs)) + headers, _url = self._prepare(params=params) + return self._process(func(url=_url.format(api), headers=headers, verify=self._verify, **kwargs)) except Exception: if self._volatile_client is not None: self._volatile_client.delete(self._key) @@ -264,7 +266,6 @@ def wait_for_task(self, task_id, timeout=None): if timeout is not None and timeout < (time.time() - start): raise TimeOutError('Waiting for task {0} has timed out.'.format(task_id)) task_metadata = self.get('/tasks/{0}/'.format(task_id)) - print task_metadata finished = task_metadata['status'] in ('FAILURE', 'SUCCESS') if finished is False: if task_metadata != previous_metadata: diff --git a/helpers/backend.py b/helpers/backend.py index 2ac8e43..b57378e 100644 --- a/helpers/backend.py +++ b/helpers/backend.py @@ -13,6 +13,8 @@ # # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. + +from ci.scenario_helpers.ci_constants import CIConstants from ovs.dal.lists.albabackendlist import AlbaBackendList from ovs.dal.lists.backendlist import BackendList from ovs.dal.lists.backendtypelist import BackendTypeList @@ -21,7 +23,7 @@ from ..helpers.exceptions import PresetNotFoundError, AlbaBackendNotFoundError -class BackendHelper(object): +class BackendHelper(CIConstants): """ BackendHelper class """ @@ -127,22 +129,20 @@ def get_albabackend_by_name(albabackend_name): BackendHelper.LOGGER.error(error_msg) raise NameError(error_msg) - @staticmethod - def get_asd_safety(albabackend_guid, asd_id, api): + @classmethod + def get_asd_safety(cls, albabackend_guid, asd_id): """ Request the calculation of the disk safety :param albabackend_guid: guid of the alba backend :type albabackend_guid: str :param asd_id: id of the asd :type asd_id: str - :param api: specify a valid api connection to the setup - :type api: helpers.api.OVSClient :return: asd safety :rtype: dict """ params = {'asd_id': asd_id} - task_guid = api.get('alba/backends/{0}/calculate_safety'.format(albabackend_guid), params=params) - result = api.wait_for_task(task_id=task_guid, timeout=30) + task_guid = cls.api.get('alba/backends/{0}/calculate_safety'.format(albabackend_guid), params=params) + result = cls.api.wait_for_task(task_id=task_guid, timeout=30) if result[0] is False: errormsg = "Calculate safety for '{0}' failed with '{1}'".format(asd_id, result[1]) @@ -150,20 +150,18 @@ def get_asd_safety(albabackend_guid, asd_id, api): raise RuntimeError(errormsg) return result[1] - @staticmethod - def get_backend_local_stack(albabackend_name, api): + @classmethod + def get_backend_local_stack(cls, albabackend_name): """ Fetches the local stack property of a backend :param albabackend_name: backend name :type albabackend_name: str - :param api: specify a valid api connection to the setup - :type api: helpers.api.OVSClient """ options = { 'contents': 'local_stack', } - return api.get(api='/alba/backends/{0}/'.format(BackendHelper.get_alba_backend_guid_by_name(albabackend_name)), + return cls.api.get(api='/alba/backends/{0}/'.format(BackendHelper.get_alba_backend_guid_by_name(albabackend_name)), params={'queryparams': options} ) diff --git a/helpers/disk.py b/helpers/disk.py index d6db106..e215429 100644 --- a/helpers/disk.py +++ b/helpers/disk.py @@ -18,13 +18,14 @@ from ovs.dal.lists.diskpartitionlist import DiskPartitionList from ..helpers.storagerouter import StoragerouterHelper - class DiskHelper(object): """ DiskHelper class """ def __init__(self): + from ..helpers.storagerouter import StoragerouterHelper + pass @staticmethod @@ -41,7 +42,7 @@ def get_diskpartitions_by_guid(diskguid): return [dp for dp in DiskPartitionList.get_partitions() if dp.disk_guid == diskguid] @staticmethod - def get_roles_from_disks(storagerouter_ip=None): + def get_roles_from_disks(storagerouter_guid=None): """ Fetch disk roles from all disks with optional storagerouter_ip @@ -50,45 +51,44 @@ def get_roles_from_disks(storagerouter_ip=None): :return: list of lists with roles :rtype: list > list """ - if not storagerouter_ip: + if not storagerouter_guid: return [partition.roles for disk in DiskList.get_disks() for partition in disk.partitions] else: - storagerouter_guid = StoragerouterHelper.get_storagerouter_guid_by_ip(storagerouter_ip) return [partition.roles for disk in DiskList.get_disks() if disk.storagerouter_guid == storagerouter_guid for partition in disk.partitions] @staticmethod - def get_disk_by_diskname(storagerouter_ip, disk_name): + def get_disk_by_diskname(storagerouter_guid, disk_name): """ - Get a disk object by storagerouter ip and disk name + Get a disk object by storagerouter guid and disk name - :param storagerouter_ip: ip address of a storage router - :type storagerouter_ip: str + :param storagerouter_guid: guid address of a storage router + :type storagerouter_guid: str :param disk_name: name of a disk (e.g. sda) :type disk_name: str :return: disk object :rtype: ovs.dal.hybrids.Disk """ - storagerouter = StoragerouterHelper.get_storagerouter_by_ip(storagerouter_ip=storagerouter_ip) + storagerouter = StoragerouterHelper.get_storagerouter_by_guid(storagerouter_guid=storagerouter_guid) for disk in storagerouter.disks: if disk.name == disk_name: return disk @staticmethod - def get_roles_from_disk(storagerouter_ip, disk_name): + def get_roles_from_disk(storagerouter_guid, disk_name): """ Get the roles from a certain disk - :param storagerouter_ip: ip address of a storage router - :type storagerouter_ip: str + :param storagerouter_guid: guid address of a storage router + :type storagerouter_guid: str :param disk_name: name of a disk (e.g. sda) :type disk_name: str :return: list of roles of all partitions on a certain disk :rtype: list """ - disk = DiskHelper.get_disk_by_diskname(storagerouter_ip, disk_name) + disk = DiskHelper.get_disk_by_diskname(storagerouter_guid, disk_name) roles_on_disk = [] if disk: for diskpartition in disk.partitions: @@ -96,4 +96,4 @@ def get_roles_from_disk(storagerouter_ip, disk_name): roles_on_disk.append(role) return roles_on_disk else: - raise RuntimeError("Disk with name `{0}` not found on storagerouter `{1}`".format(disk_name, storagerouter_ip)) + raise RuntimeError("Disk with name `{0}` not found on storagerouter `{1}`".format(disk_name, storagerouter_guid)) diff --git a/helpers/fstab.py b/helpers/fstab.py index 8e6765a..efff943 100644 --- a/helpers/fstab.py +++ b/helpers/fstab.py @@ -13,15 +13,16 @@ # # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. +import inspect - -class FstabHelper(file): +class FstabHelper(): """ Class to help with Fstab manipulations Inherits from file class """ import os + class Entry(object): """ Entry class represents a non-comment line on the `/etc/fstab` file @@ -45,14 +46,36 @@ def __eq__(self, o): def __str__(self): return "{} {} {} {} {} {}".format(self.device, self.mountpoint, self.filesystem, self.options, self.d, self.p) + def get(self, item): + if not isinstance(item,basestring): + raise ValueError('Specified parameter {0} must be a string') + item = item.lower() + if item in self.__dict__.keys(): + if item == 'device': + return self.device + elif item == 'mountpoint': + return self.mountpoint + elif item == 'options': + return self.options + elif item == 'd': + return self.d + elif item == 'p': + return self.p + else: + return None + else: + raise ValueError('Specified parameter {0} not an attribute of Entry class.'.format(item)) + DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') - def __init__(self, path=None): + _path = DEFAULT_PATH + + def __init__(cls, path=None): if path: - self._path = path + cls._path = path else: - self._path = self.DEFAULT_PATH - file.__init__(self._path, 'r+') + cls._path = cls.DEFAULT_PATH + @staticmethod def _hydrate_entry(line): @@ -62,88 +85,60 @@ def _hydrate_entry(line): :type line: str :return: """ - return FstabHelper.Entry(*filter(lambda x: x not in ('', None), line.strip("\n").split(" "))) + return FstabHelper.Entry(*filter(lambda x: x not in ('',' ', None), str(line).strip("\n").split(" "))) - @property - def entries(self): - """ - Property containing all non-comment entries - :return: - """ - self.seek(0) - for line in self.readlines(): - try: - if not line.startswith("#"): - yield self._hydrate_entry(line) - except ValueError: - pass - - def get_entry_by_attr(self, attr, value): + @classmethod + def get_entry_by_attr(cls, attr, value): """ Returns an entry with where a attr has a specific value :param attr: attribute from the entry :param value: value that the attribute should have :return: """ - for entry in self.entries: - e_attr = getattr(entry, attr) + + entries = [] + with open(cls._path, 'r') as fh: + for line in fh: + try: + if not line.startswith("#") and line.strip() is not '': + entries.append(cls._hydrate_entry(line)) + except ValueError: + pass + + for entry in entries: + e_attr = entry.get(attr) if e_attr == value: return entry return None - def add_entry(self, entry): - """ - Adds an entry in fstab - :param entry: entry object to add to fstab - :return: - """ - if self.get_entry_by_attr('device', entry.device): - return False - - self.write(str(entry) + '\n') - self.truncate() - return entry - - def remove_entry(self, entry): + @classmethod + def remove_entry(cls, entry): """ Removes a line from fstab :param entry:entry object :return: """ - self.seek(0) - - lines = self.readlines() - - found = False - line = None - for index, line in enumerate(lines): - if not line.startswith("#"): - if self._hydrate_entry(line) == entry: - found = True - break - - if not found: - return False - if line is not None: - lines.remove(line) - - self.seek(0) - self.write(''.join(lines)) - self.truncate() - return True + with open(cls._path, 'r+') as fh: + d = fh.readlines() + fh.seek(0) + for line in d: + if line.strip() != entry and not line.startswith('#'): + fh.write(line) + fh.truncate() - def remove_by_mountpoint(self, mountpoint): + @classmethod + def remove_by_mountpoint(cls, mountpoint): """ Removes an entry by specific mountpoint :param mountpoint: mountpoint :return: """ - entry = self.get_entry_by_attr('mountpoint', mountpoint) + entry = cls.get_entry_by_attr('mountpoint', mountpoint) if entry: - return self.remove_entry(entry) - return False + cls.remove_entry(entry) - def add(self, device, mountpoint, filesystem, options=None, dump=None, pass_=None): + @classmethod + def add(cls, device, mountpoint, filesystem, options=None, dump=None, pass_=None): """ Adds a entry based on supplied params :param device: devicename eg /dev/sda @@ -154,4 +149,5 @@ def add(self, device, mountpoint, filesystem, options=None, dump=None, pass_=Non :param pass_: order to check filesystem at reboot time :return: """ - return self.add_entry(FstabHelper.Entry(device, mountpoint, filesystem, options, dump)) + with open(cls._path, 'a+') as fh: + fh.write(str(FstabHelper.Entry(device, mountpoint, filesystem, options, dump))+'\n') diff --git a/helpers/storagerouter.py b/helpers/storagerouter.py index 65b43f4..3278d0d 100644 --- a/helpers/storagerouter.py +++ b/helpers/storagerouter.py @@ -13,13 +13,14 @@ # # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. + +from ci.scenario_helpers.ci_constants import CIConstants from ovs.dal.lists.storagerouterlist import StorageRouterList from ovs.dal.hybrids.storagerouter import StorageRouter from ovs.extensions.generic.logger import Logger -from ovs.extensions.generic.system import System -class StoragerouterHelper(object): +class StoragerouterHelper(CIConstants): """ StoragerouterHelper class @@ -42,77 +43,51 @@ def get_storagerouter_by_guid(storagerouter_guid): """ return StorageRouter(storagerouter_guid) - @staticmethod - def get_storagerouter_guid_by_ip(storagerouter_ip): - """ - - :param storagerouter_ip: ip of a storagerouter - :type storagerouter_ip: str - :return: storagerouter guid - :rtype: str - """ - return StoragerouterHelper.get_storagerouter_by_ip(storagerouter_ip).guid - @staticmethod def get_storagerouter_by_ip(storagerouter_ip): """ - :param storagerouter_ip: ip of a storagerouter :type storagerouter_ip: str - :return: storagerouter object + :return: storagerouter :rtype: ovs.dal.hybrids.storagerouter.StorageRouter """ return StorageRouterList.get_by_ip(storagerouter_ip) @staticmethod - def get_disks_by_ip(storagerouter_ip): + def get_storagerouter_ip(storagerouter_guid): """ - - :param storagerouter_ip: - :type storagerouter_ip: str - :return: disks found for the storagerouter ip - :rtype: list of + :param storagerouter_guid: guid of a storagerouter + :type storagerouter_guid: str + :return: storagerouter ip + :rtype: str """ - storagerouter_guid = StoragerouterHelper.get_storagerouter_guid_by_ip(storagerouter_ip) - return StorageRouter(storagerouter_guid).disks + return StorageRouter(storagerouter_guid).ip @staticmethod - def get_disk_by_ip(ip, diskname): + def get_disk_by_guid(guid, diskname): """ Fetch a disk by its ip and name - :param ip: ip address of a storagerouter + :param guid: guid of a storagerouter + :type guid: str :param diskname: shortname of a disk (e.g. sdb) :return: Disk Object :rtype: ovs.dal.hybrids.disk.disk """ - - storagerouter_guid = StoragerouterHelper.get_storagerouter_guid_by_ip(ip) - disks = StorageRouter(storagerouter_guid).disks + disks = StoragerouterHelper.get_storagerouter_by_guid(guid).disks for d in disks: if d.name == diskname: return d - @staticmethod - def get_local_storagerouter(): - """ - Fetch the local storagerouter settings - - :return: StorageRouter Object - :rtype: ovs.dal.hybrids.storagerouter.StorageRouter - """ - - return System.get_my_storagerouter() - @staticmethod def get_storagerouter_ips(): - """ - Fetch all the ip addresses in this cluster + """ + Fetch all the ip addresses in this cluster - :return: list with storagerouter ips - :rtype: list - """ - return [storagerouter.ip for storagerouter in StorageRouterList.get_storagerouters()] + :return: list with storagerouter ips + :rtype: list + """ + return [storagerouter.ip for storagerouter in StorageRouterList.get_storagerouters()] @staticmethod def get_storagerouters(): @@ -122,72 +97,25 @@ def get_storagerouters(): :return: list with storagerouters :rtype: list """ - return StorageRouterList.get_storagerouters() - @staticmethod - def get_master_storagerouters(): + @classmethod + def sync_disk_with_reality(cls, guid=None, ip=None, timeout=None): """ - Fetch the master storagerouters - - :return: list with master storagerouters - :rtype: list - """ - - return StorageRouterList.get_masters() - - @staticmethod - def get_master_storagerouter_ips(): - """ - Fetch the master storagerouters ips - - :return: list with master storagerouters ips - :rtype: list - """ - - return [storagerouter.ip for storagerouter in StorageRouterList.get_masters()] - - @staticmethod - def get_slave_storagerouters(): - """ - Fetch the slave storagerouters - - :return: list with slave storagerouters - :rtype: list - """ - - return StorageRouterList.get_slaves() - - @staticmethod - def get_slave_storagerouter_ips(): - """ - Fetch the slave storagerouters ips - - :return: list with slave storagerouters ips - :rtype: list - """ - - return [storagerouter.ip for storagerouter in StorageRouterList.get_slaves()] - - @staticmethod - def sync_disk_with_reality(api, guid=None, ip=None, timeout=None): - """ - - :param api: specify a valid api connection to the setup - :type api: helpers.api.OVSClient :param guid: guid of the storagerouter :type guid: str :param ip: ip of the storagerouter :type ip: str :param timeout: timeout time in seconds :type timeout: int - :return: """ - storagerouter_guid = guid - if ip is not None: - storagerouter_guid = StoragerouterHelper.get_storagerouter_guid_by_ip(ip) - if storagerouter_guid is None: - raise ValueError('No guid or ip found.') + if guid is not None: + if ip is not None: + Logger.warning('Both storagerouter guid and ip passed, using guid for sync.') + storagerouter_guid = guid + elif ip is not None: + storagerouter_guid = StoragerouterHelper.get_storagerouter_by_ip(ip).guid else: - task_id = api.post(api='/storagerouters/{0}/rescan_disks/'.format(storagerouter_guid), data=None) - return api.wait_for_task(task_id=task_id, timeout=timeout) + raise ValueError('No guid or ip passed.') + task_id = cls.api.post(api='/storagerouters/{0}/rescan_disks/'.format(storagerouter_guid), data=None) + return cls.api.wait_for_task(task_id=task_id, timeout=timeout) diff --git a/helpers/vdisk.py b/helpers/vdisk.py index 81dd640..c6fc8be 100644 --- a/helpers/vdisk.py +++ b/helpers/vdisk.py @@ -13,6 +13,7 @@ # # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. +from ci.scenario_helpers.ci_constants import CIConstants from ovs.dal.hybrids.vdisk import VDisk from ovs.dal.lists.vdisklist import VDiskList from ovs.dal.lists.vpoollist import VPoolList @@ -20,7 +21,7 @@ from ..helpers.exceptions import VPoolNotFoundError, VDiskNotFoundError -class VDiskHelper(object): +class VDiskHelper(CIConstants): """ vDiskHelper class """ @@ -105,8 +106,8 @@ def get_snapshot_by_guid(snapshot_guid, vdisk_name, vpool_name): raise RuntimeError("Did not find snapshot with guid `{0}` on vdisk `{1}` on vpool `{2}`" .format(snapshot_guid, vdisk_name, vpool_name)) - @staticmethod - def get_config_params(vdisk_name, vpool_name, api, timeout=GET_CONFIG_PARAMS_TIMEOUT): + + def get_config_params(cls, vdisk_name, vpool_name, timeout=GET_CONFIG_PARAMS_TIMEOUT): """ Fetch the config parameters of a vDisk @@ -115,8 +116,6 @@ def get_config_params(vdisk_name, vpool_name, api, timeout=GET_CONFIG_PARAMS_TIM :type vdisk_name: str :param vpool_name: name of a existing vpool :type vpool_name: str - :param api: specify a valid api connection to the setup - :type api: ci.helpers.api.OVSClient :param timeout: time to wait for the task to complete :type timeout: int :return: a dict with config parameters, e.g. @@ -133,8 +132,8 @@ def get_config_params(vdisk_name, vpool_name, api, timeout=GET_CONFIG_PARAMS_TIM """ vdisk = VDiskHelper.get_vdisk_by_name(vdisk_name=vdisk_name, vpool_name=vpool_name) - task_guid = api.get(api='/vdisks/{0}/get_config_params'.format(vdisk.guid)) - task_result = api.wait_for_task(task_id=task_guid, timeout=timeout) + task_guid = cls.api.get(api='/vdisks/{0}/get_config_params'.format(vdisk.guid)) + task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) if not task_result[0]: error_msg = "Setting config vDisk `{0}` has failed with error {1}".format(vdisk_name, task_result[1]) @@ -144,23 +143,21 @@ def get_config_params(vdisk_name, vpool_name, api, timeout=GET_CONFIG_PARAMS_TIM VDiskHelper.LOGGER.info("Setting config vDisk `{0}` should have succeeded".format(vdisk_name)) return task_result[1] - @staticmethod - def scrub_vdisk(vdisk_guid, api, timeout=15 * 60, wait=True): + + def scrub_vdisk(cls, vdisk_guid, timeout=15 * 60, wait=True): """ Scrub a specific vdisk :param vdisk_guid: guid of the vdisk to scrub :type vdisk_guid: str - :param api: specify a valid api connection to the setup - :type api: ci.helpers.api.OVSClient :param timeout: time to wait for the task to complete :type timeout: int :param wait: wait for task to finish or not :type wait: bool :return: """ - task_guid = api.post(api='/vdisks/{0}/scrub/'.format(vdisk_guid), data={}) + task_guid = cls.api.post(api='/vdisks/{0}/scrub/'.format(vdisk_guid), data={}) if wait is True: - task_result = api.wait_for_task(task_id=task_guid, timeout=timeout) + task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) if not task_result[0]: error_msg = "Scrubbing vDisk `{0}` has failed with error {1}".format(vdisk_guid, task_result[1]) VDiskHelper.LOGGER.error(error_msg) diff --git a/remove/backend.py b/remove/backend.py index 05d1808..ca63e78 100644 --- a/remove/backend.py +++ b/remove/backend.py @@ -13,29 +13,31 @@ # # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. +from ci.scenario_helpers.ci_constants import CIConstants from ovs.extensions.generic.logger import Logger from ..helpers.albanode import AlbaNodeHelper from ..helpers.backend import BackendHelper from ..validate.decorators import required_backend, required_preset -class BackendRemover(object): +class BackendRemover(CIConstants): LOGGER = Logger("remove-ci_backend_remover") REMOVE_ASD_TIMEOUT = 60 REMOVE_DISK_TIMEOUT = 60 REMOVE_BACKEND_TIMEOUT = 60 REMOVE_PRESET_TIMEOUT = 60 + UNLINK_BACKEND_TIMEOUT = 60 def __init__(self): pass - @staticmethod - def remove_claimed_disk(api): + @classmethod + def remove_claimed_disk(cls): pass - @staticmethod - def remove_asds(albabackend_name, target, disks, api): + @classmethod + def remove_asds(cls, albabackend_name, target, disks): """ Remove all asds from a backend @@ -43,8 +45,6 @@ def remove_asds(albabackend_name, target, disks, api): :type target: str :param disks: dict with diskname as key and amount of osds as value :type disks: dict - :param api: specify a valid api connection to the setup - :type api: helpers.api.OVSClient :param albabackend_name: Name of the AlbaBackend to configure :type albabackend_name: str :return: preset_name @@ -53,9 +53,9 @@ def remove_asds(albabackend_name, target, disks, api): albabackend_guid = BackendHelper.get_alba_backend_guid_by_name(albabackend_name) # target is a node - node_mapping = AlbaNodeHelper._map_alba_nodes(api) + node_mapping = AlbaNodeHelper._map_alba_nodes() - local_stack = BackendHelper.get_backend_local_stack(albabackend_name=albabackend_name, api=api) + local_stack = BackendHelper.get_backend_local_stack(albabackend_name=albabackend_name) for disk, amount_of_osds in disks.iteritems(): disk_object = AlbaNodeHelper.get_disk_by_ip(ip=target, diskname=disk) # Get the name of the disk out of the path, only expecting one with ata- @@ -64,15 +64,13 @@ def remove_asds(albabackend_name, target, disks, api): # Check if the alba_node_id has the disk if disk_path in local_stack['local_stack'][alba_node_id]: # Remove asds - if disk_path in local_stack['local_stack'][alba_node_id]: - for asd_id, asd_info in local_stack['local_stack'][alba_node_id][disk_path]['asds'].iteritems(): - BackendRemover.LOGGER.info('Removing asd {0} for disk {1}'.format(asd_id, local_stack['local_stack'][alba_node_id][disk_path]['guid'])) - asd_safety = BackendHelper.get_asd_safety(albabackend_guid=albabackend_guid, asd_id=asd_id, api=api) - BackendRemover._remove_asd(alba_node_guid=alba_node_guid, asd_id=asd_id, asd_safety=asd_safety, api=api) + for asd_id, asd_info in local_stack['local_stack'][alba_node_id][disk_path]['osds'].iteritems(): + BackendRemover.LOGGER.info('Removing asd {0} for disk {1}'.format(asd_id, disk_path)) + asd_safety = BackendHelper.get_asd_safety(albabackend_guid=albabackend_guid, asd_id=asd_id) + BackendRemover._remove_asd(alba_node_guid=alba_node_guid, asd_id=asd_id, asd_safety=asd_safety) # Restarting iteration to avoid too many local stack calls: - local_stack = BackendHelper.get_backend_local_stack(albabackend_name=albabackend_name, - api=api) + local_stack = BackendHelper.get_backend_local_stack(albabackend_name=albabackend_name) for disk, amount_of_osds in disks.iteritems(): disk_object = AlbaNodeHelper.get_disk_by_ip(ip=target, diskname=disk) # Get the name of the disk out of the path, only expecting one with ata- @@ -82,10 +80,10 @@ def remove_asds(albabackend_name, target, disks, api): if disk_path in local_stack['local_stack'][alba_node_id]: # Initialize disk: BackendRemover.LOGGER.info('Removing {0}.'.format(disk_path)) - BackendRemover._remove_disk(alba_node_guid=alba_node_guid, diskname=disk_path, api=api) + BackendRemover._remove_disk(alba_node_guid=alba_node_guid, diskname=disk_path) - @staticmethod - def _remove_asd(alba_node_guid, asd_id, asd_safety, api, timeout=REMOVE_ASD_TIMEOUT): + @classmethod + def _remove_asd(cls, alba_node_guid, asd_id, asd_safety, timeout=REMOVE_ASD_TIMEOUT): """ Remove a asd from a backend @@ -95,8 +93,6 @@ def _remove_asd(alba_node_guid, asd_id, asd_safety, api, timeout=REMOVE_ASD_TIME :type asd_id: str :param asd_safety: :type asd_safety: dict - :param api: specify a valid api connection to the setup - :type api: helpers.api.OVSClient :param timeout: max. time to wait for a task to complete :type timeout: int :return: @@ -106,19 +102,19 @@ def _remove_asd(alba_node_guid, asd_id, asd_safety, api, timeout=REMOVE_ASD_TIME 'asd_id': asd_id, 'safety': asd_safety } - task_guid = api.post( + task_guid = cls.api.post( api='/alba/nodes/{0}/reset_asd/'.format(alba_node_guid), data=data ) - result = api.wait_for_task(task_id=task_guid, timeout=timeout) + result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) if result[0] is False: error_msg = "Removal of ASD '{0}; failed with {1}".format(asd_id, result[1]) BackendRemover.LOGGER.error(error_msg) raise RuntimeError(error_msg) return result[0] - @staticmethod - def _remove_disk(alba_node_guid, diskname, api, timeout=REMOVE_DISK_TIMEOUT): + @classmethod + def _remove_disk(cls, alba_node_guid, diskname, timeout=REMOVE_DISK_TIMEOUT): """ Removes a an initiliazed disk from the model @@ -126,8 +122,6 @@ def _remove_disk(alba_node_guid, diskname, api, timeout=REMOVE_DISK_TIMEOUT): :type alba_node_guid: str :param diskname: name of the disk :type diskname: str - :param api: specify a valid api connection to the setup - :type api: helpers.api.OVSClient :param timeout: max. time to wait for the task to complete :type timeout: int :return: @@ -135,27 +129,25 @@ def _remove_disk(alba_node_guid, diskname, api, timeout=REMOVE_DISK_TIMEOUT): data = { 'disk': diskname, } - task_guid = api.post( + task_guid = cls.api.post( api='/alba/nodes/{0}/remove_disk/'.format(alba_node_guid), data=data ) - result = api.wait_for_task(task_id=task_guid, timeout=timeout) + result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) if result[0] is False: errormsg = "Removal of ASD '{0}' failed with '{1}'".format(diskname, result[1]) BackendRemover.LOGGER.error(errormsg) raise RuntimeError(errormsg) return result[0] - @staticmethod + @classmethod @required_backend - def remove_backend(albabackend_name, api, timeout=REMOVE_BACKEND_TIMEOUT): + def remove_backend(cls, albabackend_name, timeout=REMOVE_BACKEND_TIMEOUT): """ Removes a alba backend from the ovs cluster :param albabackend_name: the name of a existing alba backend :type albabackend_name: str - :param api: specify a valid api connection to the setup - :type api: helpers.api.OVSClient :param timeout: max. time to wait for a task to complete :type timeout: int :return: task was succesfull or not @@ -163,20 +155,19 @@ def remove_backend(albabackend_name, api, timeout=REMOVE_BACKEND_TIMEOUT): """ alba_backend_guid = BackendHelper.get_alba_backend_guid_by_name(albabackend_name) - task_guid = api.delete(api='/alba/backends/{0}'.format(alba_backend_guid)) - - result = api.wait_for_task(task_id=task_guid, timeout=timeout) + task_guid = cls.api.delete(api='/alba/backends/{0}'.format(alba_backend_guid)) + result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) if result[0] is False: errormsg = "Removal of backend '{0}' failed with '{1}'".format(albabackend_name, result[1]) BackendRemover.LOGGER.error(errormsg) raise RuntimeError(errormsg) return result[0] - @staticmethod + @classmethod @required_preset @required_backend - def remove_preset(preset_name, albabackend_name, api, timeout=REMOVE_PRESET_TIMEOUT): + def remove_preset(cls, preset_name, albabackend_name, timeout=REMOVE_PRESET_TIMEOUT): """ Removes a alba backend from the ovs cluster @@ -184,8 +175,6 @@ def remove_preset(preset_name, albabackend_name, api, timeout=REMOVE_PRESET_TIME :type preset_name: str :param albabackend_name: name of the albabackend :type albabackend_name: str - :param api: specify a valid api connection to the setup - :type api: helpers.api.OVSClient :param timeout: max. time to wait for a task to complete :type timeout: int :return: task was succesfull or not @@ -194,12 +183,156 @@ def remove_preset(preset_name, albabackend_name, api, timeout=REMOVE_PRESET_TIME alba_backend_guid = BackendHelper.get_alba_backend_guid_by_name(albabackend_name) data = {"name": preset_name} - task_guid = api.post(api='/alba/backends/{0}/delete_preset'.format(alba_backend_guid), data=data) + task_guid = cls.api.post(api='/alba/backends/{0}/delete_preset'.format(alba_backend_guid), data=data) - result = api.wait_for_task(task_id=task_guid, timeout=timeout) + result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) if result[0] is False: errormsg = "Removal of preset '{0}' for backend '{1}' failed with '{2}'".format(preset_name, albabackend_name, result[1]) BackendRemover.LOGGER.error(errormsg) raise RuntimeError(errormsg) return result[0] + + + @classmethod + #@required_backend + def unlink_backend(cls, globalbackend_name, albabackend_name, timeout=UNLINK_BACKEND_TIMEOUT): + """ + Link a LOCAL backend to a GLOBAL backend + + :param globalbackend_name: name of a GLOBAL alba backend + :type globalbackend_name: str + :param albabackend_name: name of a backend to unlink + :type albabackend_name: str + :param timeout: timeout counter in seconds + :type timeout: int + :return: + """ + data = { + "linked_guid": BackendHelper.get_alba_backend_guid_by_name(albabackend_name) + } + + task_guid = cls.api.post( + api='/alba/backends/{0}/unlink_alba_backends' + .format(BackendHelper.get_alba_backend_guid_by_name(globalbackend_name)), + data=data + ) + + task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) + if not task_result[0]: + error_msg = "Unlinking backend `{0}` from global backend `{1}` has failed with error '{2}'".format( + albabackend_name, globalbackend_name, task_result[1]) + BackendRemover.LOGGER.error(error_msg) + raise RuntimeError(error_msg) + else: + BackendRemover.LOGGER.info("Unlinking backend `{0}` from global backend `{1}` should have succeeded" + .format(albabackend_name, globalbackend_name)) + return task_result[0] + + + @classmethod + #@required_backend + def unlink_backend(cls, globalbackend_name, albabackend_name, timeout=UNLINK_BACKEND_TIMEOUT): + """ + Link a LOCAL backend to a GLOBAL backend + + :param globalbackend_name: name of a GLOBAL alba backend + :type globalbackend_name: str + :param albabackend_name: name of a backend to unlink + :type albabackend_name: str + :param timeout: timeout counter in seconds + :type timeout: int + :return: + """ + data = { + "linked_guid": BackendHelper.get_alba_backend_guid_by_name(albabackend_name) + } + + task_guid = cls.api.post( + api='/alba/backends/{0}/unlink_alba_backends' + .format(BackendHelper.get_alba_backend_guid_by_name(globalbackend_name)), + data=data + ) + + task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) + if not task_result[0]: + error_msg = "Unlinking backend `{0}` from global backend `{1}` has failed with error '{2}'".format( + albabackend_name, globalbackend_name, task_result[1]) + BackendRemover.LOGGER.error(error_msg) + raise RuntimeError(error_msg) + else: + BackendRemover.LOGGER.info("Unlinking backend `{0}` from global backend `{1}` should have succeeded" + .format(albabackend_name, globalbackend_name)) + return task_result[0] + + + @classmethod + #@required_backend + def unlink_backend(cls, globalbackend_name, albabackend_name, timeout=UNLINK_BACKEND_TIMEOUT): + """ + Link a LOCAL backend to a GLOBAL backend + + :param globalbackend_name: name of a GLOBAL alba backend + :type globalbackend_name: str + :param albabackend_name: name of a backend to unlink + :type albabackend_name: str + :param timeout: timeout counter in seconds + :type timeout: int + :return: + """ + data = { + "linked_guid": BackendHelper.get_alba_backend_guid_by_name(albabackend_name) + } + + task_guid = cls.api.post( + api='/alba/backends/{0}/unlink_alba_backends' + .format(BackendHelper.get_alba_backend_guid_by_name(globalbackend_name)), + data=data + ) + + task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) + if not task_result[0]: + error_msg = "Unlinking backend `{0}` from global backend `{1}` has failed with error '{2}'".format( + albabackend_name, globalbackend_name, task_result[1]) + BackendRemover.LOGGER.error(error_msg) + raise RuntimeError(error_msg) + else: + BackendRemover.LOGGER.info("Unlinking backend `{0}` from global backend `{1}` should have succeeded" + .format(albabackend_name, globalbackend_name)) + return task_result[0] + + + @classmethod + #@required_backend + def unlink_backend(cls, globalbackend_name, albabackend_name, timeout=UNLINK_BACKEND_TIMEOUT): + """ + Link a LOCAL backend to a GLOBAL backend + + :param globalbackend_name: name of a GLOBAL alba backend + :type globalbackend_name: str + :param albabackend_name: name of a backend to unlink + :type albabackend_name: str + :param timeout: timeout counter in seconds + :type timeout: int + :return: + """ + data = { + "linked_guid": BackendHelper.get_alba_backend_guid_by_name(albabackend_name) + } + + task_guid = cls.api.post( + api='/alba/backends/{0}/unlink_alba_backends' + .format(BackendHelper.get_alba_backend_guid_by_name(globalbackend_name)), + data=data + ) + + task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) + if not task_result[0]: + error_msg = "Unlinking backend `{0}` from global backend `{1}` has failed with error '{2}'".format( + albabackend_name, globalbackend_name, task_result[1]) + BackendRemover.LOGGER.error(error_msg) + raise RuntimeError(error_msg) + else: + BackendRemover.LOGGER.info("Unlinking backend `{0}` from global backend `{1}` should have succeeded" + .format(albabackend_name, globalbackend_name)) + return task_result[0] diff --git a/remove/roles.py b/remove/roles.py index e7f9c7e..93308b6 100644 --- a/remove/roles.py +++ b/remove/roles.py @@ -13,6 +13,8 @@ # # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. + +from ci.scenario_helpers.ci_constants import CIConstants from subprocess import check_output from ovs.extensions.generic.logger import Logger from ..helpers.fstab import FstabHelper @@ -20,7 +22,7 @@ from ..setup.roles import RoleSetup -class RoleRemover(object): +class RoleRemover(CIConstants): LOGGER = Logger("remove-ci_role_remover") CONFIGURE_DISK_TIMEOUT = 300 @@ -56,27 +58,26 @@ def _remove_filesystem(device, alias_part_label): RoleRemover.LOGGER.exception('Unable to remove filesystem of {0}'.format(alias_part_label)) raise RuntimeError('Could not remove filesystem of {0}'.format(alias_part_label)) - @staticmethod - def remove_role(ip, diskname, api): - allowed_roles = ['WRITE', 'READ', 'SCRUB', 'DB'] + @classmethod + def remove_role(cls, storagerouter_ip, diskname): + allowed_roles = ['WRITE', 'DTL', 'SCRUB', 'DB'] RoleRemover.LOGGER.info("Starting removal of disk roles.") - # Fetch information - storagerouter_guid = StoragerouterHelper.get_storagerouter_guid_by_ip(ip) - disk = StoragerouterHelper.get_disk_by_ip(ip, diskname) + storagerouter = StoragerouterHelper.get_storagerouter_by_ip(storagerouter_ip=storagerouter_ip) + disk = StoragerouterHelper.get_disk_by_guid(guid=storagerouter.guid, diskname=diskname) # Check if there are any partitions on the disk, if so check if there is enough space if len(disk.partitions) > 0: for partition in disk.partitions: # Remove all partitions that have roles if set(partition.roles).issubset(allowed_roles) and len(partition.roles) > 0: RoleRemover.LOGGER.info("Removing {0} from partition {1} on disk {2}".format(partition.roles, partition.guid, diskname)) - RoleSetup.configure_disk(storagerouter_guid=storagerouter_guid, + RoleSetup.configure_disk(storagerouter_guid=storagerouter.guid, disk_guid=disk.guid, offset=partition.offset, size=disk.size, roles=[], - api=api, partition_guid=partition.guid) + # Unmount partition RoleRemover.LOGGER.info("Umounting disk {2}".format(partition.roles, partition.guid, diskname)) RoleRemover._umount(partition.mountpoint) @@ -92,6 +93,8 @@ def remove_role(ip, diskname, api): RoleRemover.LOGGER.info("Removing partition {0} on disk {1} from model".format(partition.guid, diskname)) partition.delete() else: - RoleRemover.LOGGER.info("Found no roles on partition {1} on disk {2}".format(partition.roles, partition.guid, diskname)) + print 'Found no roles on partition' + RoleRemover.LOGGER.info("{1} on disk {2}".format(partition.roles, partition.guid, diskname)) else: + print 'found no partition' RoleRemover.LOGGER.info("Found no partition on the disk.") diff --git a/remove/vdisk.py b/remove/vdisk.py index fc7f962..de42e1d 100644 --- a/remove/vdisk.py +++ b/remove/vdisk.py @@ -14,12 +14,13 @@ # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. +from ci.scenario_helpers.ci_constants import CIConstants from ovs.extensions.generic.logger import Logger from ..helpers.vdisk import VDiskHelper from ..validate.decorators import required_vtemplate -class VDiskRemover(object): +class VDiskRemover(CIConstants): LOGGER = Logger("remove-ci_vdisk_remover") REMOVE_SNAPSHOT_TIMEOUT = 60 @@ -29,13 +30,11 @@ class VDiskRemover(object): def __init__(self): pass - @staticmethod - def remove_vdisks_with_structure(vdisks, api, timeout=REMOVE_VDISK_TIMEOUT): + @classmethod + def remove_vdisks_with_structure(cls, vdisks, timeout=REMOVE_VDISK_TIMEOUT): """ Remove many vdisks at once. Will keep the parent structure in mind :param vdisks: list of vdisks - :param api: specify a valid api connection to the setup - :type api: helpers.api.OVSClient :param timeout: seconds to elapse before raising a timeout error (for each volume) :return: """ @@ -50,8 +49,8 @@ def remove_vdisks_with_structure(vdisks, api, timeout=REMOVE_VDISK_TIMEOUT): VDiskRemover.remove_vdisk(vdisk.guid, api, timeout) removed_guids.append(vdisk.guid) - @staticmethod - def remove_snapshot(snapshot_guid, vdisk_name, vpool_name, api, timeout=REMOVE_SNAPSHOT_TIMEOUT): + @classmethod + def remove_snapshot(cls, snapshot_guid, vdisk_name, vpool_name, timeout=REMOVE_SNAPSHOT_TIMEOUT): """ Remove a existing snapshot from a existing vdisk :param vdisk_name: location of a vdisk on a vpool @@ -59,8 +58,6 @@ def remove_snapshot(snapshot_guid, vdisk_name, vpool_name, api, timeout=REMOVE_S :type vdisk_name: str :param snapshot_guid: unique guid of a snapshot :type snapshot_guid: str - :param api: specify a valid api connection to the setup - :type api: helpers.api.OVSClient :param timeout: time to wait for the task to complete :type timeout: int :param vpool_name: name of a existing vpool @@ -71,11 +68,11 @@ def remove_snapshot(snapshot_guid, vdisk_name, vpool_name, api, timeout=REMOVE_S vdisk_guid = VDiskHelper.get_vdisk_by_name(vdisk_name, vpool_name).guid data = {"snapshot_id": snapshot_guid} - task_guid = api.post( + task_guid = cls.api.post( api='/vdisks/{0}/remove_snapshot/'.format(vdisk_guid), data=data ) - task_result = api.wait_for_task(task_id=task_guid, timeout=timeout) + task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) if not task_result[0]: error_msg = "Deleting snapshot `{0}` for vdisk `{1}` has failed".format(snapshot_guid, vdisk_name) @@ -86,21 +83,19 @@ def remove_snapshot(snapshot_guid, vdisk_name, vpool_name, api, timeout=REMOVE_S .format(snapshot_guid, vdisk_name)) return True - @staticmethod - def remove_vdisk(vdisk_guid, api, timeout=REMOVE_VDISK_TIMEOUT): + @classmethod + def remove_vdisk(cls, vdisk_guid, timeout=REMOVE_VDISK_TIMEOUT): """ Remove a vdisk from a vPool :param vdisk_guid: guid of a existing vdisk :type vdisk_guid: str - :param api: specify a valid api connection to the setup - :type api: helpers.api.OVSClient :param timeout: time to wait for the task to complete :type timeout: int :return: if success :rtype: bool """ - task_guid = api.post(api='vdisks/{0}/delete'.format(vdisk_guid)) - task_result = api.wait_for_task(task_id=task_guid, timeout=timeout) + task_guid = cls.api.post(api='vdisks/{0}/delete'.format(vdisk_guid)) + task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) if not task_result[0]: error_msg = "Deleting vDisk `{0}` has failed".format(vdisk_guid) VDiskRemover.LOGGER.error(error_msg) @@ -109,43 +104,39 @@ def remove_vdisk(vdisk_guid, api, timeout=REMOVE_VDISK_TIMEOUT): VDiskRemover.LOGGER.info("Deleting vDisk `{0}` should have succeeded".format(vdisk_guid)) return True - @staticmethod - def remove_vdisk_by_name(vdisk_name, vpool_name, api, timeout=REMOVE_VDISK_TIMEOUT): + @classmethod + def remove_vdisk_by_name(cls, vdisk_name, vpool_name, timeout=REMOVE_VDISK_TIMEOUT): """ Remove a vdisk from a vPool :param vdisk_name: name of a existing vdisk (e.g. test.raw) :type vdisk_name: str - :param api: specify a valid api connection to the setup - :type api: helpers.api.OVSClient :param vpool_name: name of a existing vpool :type vpool_name: str :return: if success :rtype: bool """ vdisk_guid = VDiskHelper.get_vdisk_by_name(vdisk_name, vpool_name).guid - return VDiskRemover.remove_vdisk(vdisk_guid, api, timeout) + return VDiskRemover.remove_vdisk(vdisk_guid, timeout) - @staticmethod + @classmethod @required_vtemplate - def remove_vtemplate_by_name(vdisk_name, vpool_name, api, timeout=REMOVE_VTEMPLATE_TIMEOUT): + def remove_vtemplate_by_name(cls, vdisk_name, vpool_name, timeout=REMOVE_VTEMPLATE_TIMEOUT): """ Remove a vTemplate from a cluster :param vdisk_name: name of a existing vdisk (e.g. test.raw) :type vdisk_name: str :param vpool_name: name of a existing vpool :type vpool_name: str - :param api: specify a valid api connection to the setup - :type api: helpers.api.OVSClient :param timeout: time to wait for the task to complete :type timeout: int :return: if success :rtype: bool """ vdisk_guid = VDiskHelper.get_vdisk_by_name(vdisk_name, vpool_name).guid - task_guid = api.post( + task_guid = cls.api.post( api='/vdisks/{0}/delete_vtemplate/'.format(vdisk_guid) ) - task_result = api.wait_for_task(task_id=task_guid, timeout=timeout) + task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) if not task_result[0]: error_msg = "Deleting vTemplate `{0}` has failed".format(vdisk_name) diff --git a/remove/vpool.py b/remove/vpool.py index 471d1b9..2261eac 100644 --- a/remove/vpool.py +++ b/remove/vpool.py @@ -14,26 +14,25 @@ # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. +from ci.scenario_helpers.ci_constants import CIConstants from ovs.extensions.generic.logger import Logger from ..helpers.storagerouter import StoragerouterHelper from ..helpers.vpool import VPoolHelper -class VPoolRemover(object): +class VPoolRemover(CIConstants): LOGGER = Logger("remove-ci_vpool_remover") REMOVE_VPOOL_TIMEOUT = 500 - @staticmethod - def remove_vpool(vpool_name, storagerouter_ip, api, timeout=REMOVE_VPOOL_TIMEOUT): + @classmethod + def remove_vpool(cls, vpool_name, storagerouter_ip, timeout=REMOVE_VPOOL_TIMEOUT): """ Removes a existing vpool from a storagerouter :param vpool_name: the name of a existing vpool :type vpool_name: str :param storagerouter_ip: the ip address of a existing storagerouter :type storagerouter_ip: str - :param api: specify a valid api connection to the setup - :type api: helpers.api.OVSClient :param timeout: max. time to wait for a task to complete :type timeout: int :return: None @@ -42,9 +41,8 @@ def remove_vpool(vpool_name, storagerouter_ip, api, timeout=REMOVE_VPOOL_TIMEOUT vpool_guid = VPoolHelper.get_vpool_by_name(vpool_name).guid storagerouter_guid = StoragerouterHelper.get_storagerouter_by_ip(storagerouter_ip).guid data = {"storagerouter_guid": storagerouter_guid} - task_guid = api.post(api='/vpools/{0}/shrink_vpool/'.format(vpool_guid), data=data) - - task_result = api.wait_for_task(task_id=task_guid, timeout=timeout) + task_guid = cls.api.post(api='/vpools/{0}/shrink_vpool/'.format(vpool_guid), data=data) + task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) if not task_result[0]: error_msg = "Deleting vPool `{0}` on storagerouter `{1}` has failed with error {2}".format(vpool_name, storagerouter_ip, task_result[1]) diff --git a/setup/backend.py b/setup/backend.py index 6c9f294..bd26070 100644 --- a/setup/backend.py +++ b/setup/backend.py @@ -14,6 +14,7 @@ # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. import time +from ci.scenario_helpers.ci_constants import CIConstants from ovs.extensions.generic.logger import Logger from ..helpers.albanode import AlbaNodeHelper from ..helpers.backend import BackendHelper @@ -21,7 +22,7 @@ check_linked_backend, filter_osds -class BackendSetup(object): +class BackendSetup(CIConstants): LOGGER = Logger("setup-ci_backend_setup") LOCAL_STACK_SYNC = 30 @@ -32,16 +33,15 @@ class BackendSetup(object): CLAIM_ASD_TIMEOUT = 60 LINK_BACKEND_TIMEOUT = 60 MAX_BACKEND_TRIES = 20 - MAX_CLAIM_RETRIES = 5 def __init__(self): pass - @staticmethod + @classmethod @check_backend @required_roles(['DB']) - def add_backend(backend_name, api, scaling='LOCAL', timeout=BACKEND_TIMEOUT, max_tries=MAX_BACKEND_TRIES): + def add_backend(cls, backend_name, scaling='LOCAL', timeout=BACKEND_TIMEOUT, max_tries=MAX_BACKEND_TRIES): """ Add a new backend :param backend_name: Name of the Backend to add @@ -50,8 +50,6 @@ def add_backend(backend_name, api, scaling='LOCAL', timeout=BACKEND_TIMEOUT, max :type scaling: str :return: backend_name :rtype: str - :param api: specify a valid api connection to the setup - :type api: helpers.api.OVSClient :param timeout: timeout between tries :type timeout: int :param max_tries: amount of max. tries to check if a backend has been successfully created @@ -60,7 +58,7 @@ def add_backend(backend_name, api, scaling='LOCAL', timeout=BACKEND_TIMEOUT, max :rtype: bool """ # ADD_BACKEND - backend = api.post( + backend = cls.api.post( api='backends', data={ 'name': backend_name, @@ -70,7 +68,7 @@ def add_backend(backend_name, api, scaling='LOCAL', timeout=BACKEND_TIMEOUT, max ) # ADD_ALBABACKEND - api.post(api='alba/backends', data={'backend_guid': backend['guid'], 'scaling': scaling}) + cls.api.post(api='alba/backends', data={'backend_guid': backend['guid'], 'scaling': scaling}) # CHECK_STATUS until done backend_running_status = "RUNNING" @@ -96,10 +94,10 @@ def add_backend(backend_name, api, scaling='LOCAL', timeout=BACKEND_TIMEOUT, max .format(backend_name, scaling, BackendHelper.get_backend_status_by_name(backend_name))) return False - @staticmethod + @classmethod @check_preset @required_backend - def add_preset(albabackend_name, preset_details, api, timeout=ADD_PRESET_TIMEOUT): + def add_preset(cls, albabackend_name, preset_details, timeout=ADD_PRESET_TIMEOUT): """ Add a new preset :param albabackend_name: albabackend name (e.g. 'mybackend') @@ -117,8 +115,6 @@ def add_preset(albabackend_name, preset_details, api, timeout=ADD_PRESET_TIMEOUT "fragment_size": 2097152 } :type preset_details: dict - :param api: specify a valid api connection to the setup - :type api: helpers.api.OVSClient :param timeout: amount of max time that preset may take to be added :type timeout: int :return: success or not @@ -132,12 +128,12 @@ def add_preset(albabackend_name, preset_details, api, timeout=ADD_PRESET_TIMEOUT 'fragment_size': preset_details['fragment_size']} # ADD_PRESET - task_guid = api.post( + task_guid = cls.api.post( api='/alba/backends/{0}/add_preset'.format(BackendHelper.get_alba_backend_guid_by_name(albabackend_name)), data=preset ) - task_result = api.wait_for_task(task_id=task_guid, timeout=timeout) + task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) if not task_result[0]: error_msg = "Preset `{0}` has failed to create on backend `{1}`".format(preset_details['name'], albabackend_name) @@ -147,10 +143,10 @@ def add_preset(albabackend_name, preset_details, api, timeout=ADD_PRESET_TIMEOUT BackendSetup.LOGGER.info("Creation of preset `{0}` should have succeeded on backend `{1}`".format(preset_details['name'], albabackend_name)) return True - @staticmethod + @classmethod @required_preset @required_backend - def update_preset(albabackend_name, preset_name, policies, api, timeout=UPDATE_PRESET_TIMEOUT): + def update_preset(cls, albabackend_name, preset_name, policies, timeout=UPDATE_PRESET_TIMEOUT): """ Update a existing preset :param albabackend_name: albabackend name @@ -159,20 +155,18 @@ def update_preset(albabackend_name, preset_name, policies, api, timeout=UPDATE_P :type preset_name: str :param policies: policies to be updated (e.g. [[1,1,2,2], [1,1,1,2]]) :type policies: list > list - :param api: specify a valid api connection to the setup - :type api: helpers.api.OVSClient :param timeout: amount of max time that preset may take to be added :type timeout: int :return: success or not :rtype: bool """ - task_guid = api.post( + task_guid = cls.api.post( api='/alba/backends/{0}/update_preset' .format(BackendHelper.get_alba_backend_guid_by_name(albabackend_name)), data={"name": preset_name, "policies": policies} ) - task_result = api.wait_for_task(task_id=task_guid, timeout=timeout) + task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) if not task_result[0]: error_msg = "Preset `{0}` has failed to update with policies `{1}` on backend `{2}`"\ @@ -184,30 +178,28 @@ def update_preset(albabackend_name, preset_name, policies, api, timeout=UPDATE_P .format(preset_name, albabackend_name)) return True - @staticmethod + @classmethod @required_backend @filter_osds - def add_asds(target, disks, albabackend_name, api, claim_retries=MAX_CLAIM_RETRIES): + def add_asds(cls, target, disks, albabackend_name, claim_retries=MAX_CLAIM_RETRIES): """ Initialize and claim a new asds on given disks :param target: target to add asds too :type target: str :param disks: dict with diskname as key and amount of osds as value :type disks: dict - :param claim_retries: Maximum amount of claim retries - :type claim_retries: int - :param api: specify a valid api connection to the setup - :type api: helpers.api.OVSClient :param albabackend_name: Name of the AlbaBackend to configure :type albabackend_name: str + :param claim_retries: Maximum amount of claim retries + :type claim_retries: int :return: preset_name :rtype: str """ - BackendSetup._discover_and_register_nodes(api) # Make sure all backends are registered - node_mapping = AlbaNodeHelper._map_alba_nodes(api) # target is a node + BackendSetup._discover_and_register_nodes() # Make sure all backends are registered + node_mapping = AlbaNodeHelper._map_alba_nodes() # target is a node alba_backend_guid = BackendHelper.get_alba_backend_guid_by_name(albabackend_name) - backend_info = BackendHelper.get_backend_local_stack(albabackend_name=albabackend_name, api=api) + backend_info = BackendHelper.get_backend_local_stack(albabackend_name=albabackend_name) local_stack = backend_info['local_stack'] node_slot_information = {} for disk, amount_of_osds in disks.iteritems(): @@ -227,7 +219,7 @@ def add_asds(target, disks, albabackend_name, api, claim_retries=MAX_CLAIM_RETRI node_slot_information[alba_node_guid] = slot_information for alba_node_guid, slot_information in node_slot_information.iteritems(): BackendSetup.LOGGER.info('Posting {0} for alba_node_guid {1}'.format(slot_information, alba_node_guid)) - BackendSetup._fill_slots(alba_node_guid=alba_node_guid, slot_information=slot_information, api=api) + BackendSetup._fill_slots(alba_node_guid=alba_node_guid, slot_information=slot_information) # Local stack should sync with the new disks BackendSetup.LOGGER.info('Sleeping for {0} seconds to let local stack sync.'.format(BackendSetup.LOCAL_STACK_SYNC)) @@ -266,14 +258,12 @@ def add_asds(target, disks, albabackend_name, api, claim_retries=MAX_CLAIM_RETRI node_osds_to_claim[alba_node_guid] = osds_to_claim for alba_node_guid, osds_to_claim in node_osds_to_claim.iteritems(): BackendSetup.LOGGER.info('Posting {0} for alba_node_guid {1}'.format(osds_to_claim, alba_node_guid)) - BackendSetup._claim_osds(alba_backend_name=albabackend_name, alba_node_guid=alba_node_guid, osds=osds_to_claim, api=api) + BackendSetup._claim_osds(alba_backend_name=albabackend_name, alba_node_guid=alba_node_guid, osds=osds_to_claim) - @staticmethod - def _discover_and_register_nodes(api): + @classmethod + def _discover_and_register_nodes(cls): """ Will discover and register potential nodes to the DAL/Alba - :param api: specify a valid api connection to the setup - :type api: helpers.api.OVSClient """ options = { @@ -281,61 +271,54 @@ def _discover_and_register_nodes(api): 'contents': 'node_id,_relations', 'discover': True } - response = api.get( + response = cls.api.get( api='alba/nodes', params=options ) for node in response['data']: - api.post( + cls.api.post( api='alba/nodes', data={'node_id': {'node_id': node['node_id']}} ) - @staticmethod - def _map_alba_nodes(api): + @classmethod + def _map_alba_nodes(cls): """ Will map the alba_node_id with its guid counterpart and return the map dict - :param api: specify a valid api connection to the setup - :type api: helpers.api.OVSClient """ mapping = {} options = { 'contents': 'node_id,_relations', } - response = api.get( + response = cls.api.get( api='alba/nodes', params=options ) for node in response['data']: - print node mapping[node['node_id']] = node['guid'] return mapping - @staticmethod - def get_backend_local_stack(alba_backend_name, api): + @classmethod + def get_backend_local_stack(cls, alba_backend_name): """ Fetches the local stack property of a backend :param alba_backend_name: backend name :type alba_backend_name: str - :param api: specify a valid api connection to the setup - :type api: helpers.api.OVSClient """ options = { 'contents': 'local_stack', } - return api.get(api='/alba/backends/{0}/'.format(BackendHelper.get_alba_backend_guid_by_name(alba_backend_name)), + return cls.api.get(api='/alba/backends/{0}/'.format(BackendHelper.get_alba_backend_guid_by_name(alba_backend_name)), params={'queryparams': options} ) - @staticmethod - def _fill_slots(alba_node_guid, api, slot_information, timeout=INITIALIZE_DISK_TIMEOUT): + @classmethod + def _fill_slots(cls, alba_node_guid, slot_information, timeout=INITIALIZE_DISK_TIMEOUT): """ Initializes a disk to create osds :param alba_node_guid: - :param api: specify a valid api connection to the setup - :type api: helpers.api.OVSClient :param timeout: timeout counter in seconds :param slot_information: list of slots to fill :type slot_information: list @@ -344,11 +327,11 @@ def _fill_slots(alba_node_guid, api, slot_information, timeout=INITIALIZE_DISK_T """ data = {'slot_information': slot_information} - task_guid = api.post( + task_guid = cls.api.post( api='/alba/nodes/{0}/fill_slots/'.format(alba_node_guid), data=data ) - task_result = api.wait_for_task(task_id=task_guid, timeout=timeout) + task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) if not task_result[0]: error_msg = "Initialize disk `{0}` for alba node `{1}` has failed".format(data, alba_node_guid) BackendSetup.LOGGER.error(error_msg) @@ -357,8 +340,8 @@ def _fill_slots(alba_node_guid, api, slot_information, timeout=INITIALIZE_DISK_T BackendSetup.LOGGER.info("Successfully initialized '{0}'".format(data)) return task_result[0] - @staticmethod - def _claim_osds(alba_backend_name, alba_node_guid, osds, api, timeout=CLAIM_ASD_TIMEOUT): + @classmethod + def _claim_osds(cls, alba_backend_name, alba_node_guid, osds, timeout=CLAIM_ASD_TIMEOUT): """ Claims a asd :param alba_backend_name: backend name @@ -367,19 +350,17 @@ def _claim_osds(alba_backend_name, alba_node_guid, osds, api, timeout=CLAIM_ASD_ :type alba_node_guid: str :param osds: list of osds to claim :type osds: list - :param api: specify a valid api connection to the setup - :type api: helpers.api.OVSClient :param timeout: timeout counter in seconds :type timeout: int :return: """ data = {'alba_node_guid': alba_node_guid, 'osds': osds} - task_guid = api.post( + task_guid = cls.api.post( api='/alba/backends/{0}/add_osds/'.format(BackendHelper.get_alba_backend_guid_by_name(alba_backend_name)), data=data ) - task_result = api.wait_for_task(task_id=task_guid, timeout=timeout) + task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) if not task_result[0]: error_msg = "Claim ASD `{0}` for alba backend `{1}` has failed with error '{2}'".format(osds, alba_backend_name, task_result[1]) @@ -389,11 +370,11 @@ def _claim_osds(alba_backend_name, alba_node_guid, osds, api, timeout=CLAIM_ASD_ BackendSetup.LOGGER.info("Succesfully claimed '{0}'".format(osds)) return task_result[0] - @staticmethod + @classmethod @required_preset @required_backend @check_linked_backend - def link_backend(albabackend_name, globalbackend_name, preset_name, api, timeout=LINK_BACKEND_TIMEOUT): + def link_backend(cls, albabackend_name, globalbackend_name, preset_name, timeout=LINK_BACKEND_TIMEOUT): """ Link a LOCAL backend to a GLOBAL backend @@ -403,8 +384,6 @@ def link_backend(albabackend_name, globalbackend_name, preset_name, api, timeout :type globalbackend_name: str :param preset_name: name of the preset available in the LOCAL alba backend :type preset_name: str - :param api: specify a valid api connection to the setup - :type api: helpers.api.OVSClient :param timeout: timeout counter in seconds :type timeout: int :return: @@ -427,14 +406,13 @@ def link_backend(albabackend_name, globalbackend_name, preset_name, api, timeout } } } - task_guid = api.post( + task_guid = cls.api.post( api='/alba/backends/{0}/link_alba_backends' .format(BackendHelper.get_alba_backend_guid_by_name(globalbackend_name)), data=data ) - task_result = api.wait_for_task(task_id=task_guid, timeout=timeout) - + task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) if not task_result[0]: error_msg = "Linking backend `{0}` to global backend `{1}` has failed with error '{2}'".format( albabackend_name, globalbackend_name, task_result[1]) diff --git a/setup/domain.py b/setup/domain.py index d4e2f7d..68159b7 100644 --- a/setup/domain.py +++ b/setup/domain.py @@ -14,6 +14,7 @@ # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. +from ci.scenario_helpers.ci_constants import CIConstants from ovs.extensions.generic.logger import Logger from ..helpers.backend import BackendHelper from ..helpers.domain import DomainHelper @@ -21,29 +22,27 @@ from ..validate.decorators import required_backend -class DomainSetup(object): +class DomainSetup(CIConstants): LOGGER = Logger("setup-ci_domain_setup") def __init__(self): pass - @staticmethod - def add_domain(domain_name, api): + @classmethod + def add_domain(cls, domain_name): """ Add a new (recovery) domain to the cluster :param domain_name: name of a new domain :type domain_name: str - :param api: specify a valid api connection to the setup - :type api: helpers.api.OVSClient :return: """ # check if domain already exists if not DomainHelper.get_domain_by_name(domain_name): data = {"name": domain_name} - api.post( + cls.api.post( api='/domains/', data=data ) @@ -58,22 +57,18 @@ def add_domain(domain_name, api): else: return - @staticmethod - def link_domains_to_storagerouter(domain_details, storagerouter_ip, api): + @classmethod + def link_domains_to_storagerouter(cls, domain_details, storagerouter_ip): """ Link a existing domain(s) and/or recovery (domains) to a storagerouter - :param domain_details: domain details of a storagerouter example: {"domain_guids":["Gravelines"],"recovery_domain_guids":["Roubaix", "Strasbourg"]} :type domain_details: dict :param storagerouter_ip: ip address of a storage router :type storagerouter_ip: str - :param api: specify a valid api connection to the setup - :type api: helpers.api.OVSClient :return: """ - storagerouter_guid = StoragerouterHelper.get_storagerouter_guid_by_ip(storagerouter_ip) domain_guids = [] recovery_domain_guids = [] # translate domain names to domain guids @@ -86,25 +81,27 @@ def link_domains_to_storagerouter(domain_details, storagerouter_ip, api): data = {"domain_guids": domain_guids, "recovery_domain_guids": recovery_domain_guids} - api.post( + + storagerouter_guid = StoragerouterHelper.get_storagerouter_by_ip(storagerouter_ip).guid + cls.api.post( api='/storagerouters/{0}/set_domains/'.format(storagerouter_guid), data=data ) - storagerouter = StoragerouterHelper.get_storagerouter_by_ip(storagerouter_ip) + storagerouter = StoragerouterHelper.get_storagerouter_by_guid(storagerouter_guid=storagerouter_guid) if len(set(domain_guids) - set(storagerouter.regular_domains)) != 0 or \ len(set(recovery_domain_guids) - set(storagerouter.recovery_domains)) != 0: - error_msg = "Failed to link (recovery) domain(s) to storagerouter `{0}`".format(storagerouter_ip) + error_msg = "Failed to link (recovery) domain(s) to storagerouter `{0}`".format(storagerouter_guid) DomainSetup.LOGGER.error(error_msg) raise RuntimeError(error_msg) else: DomainSetup.LOGGER.info("Successfully linked domain (recovery) domain(s) to storagerouter `{0}`" - .format(storagerouter_ip)) + .format(storagerouter_guid)) return - @staticmethod + @classmethod @required_backend - def link_domains_to_backend(domain_details, albabackend_name, api): + def link_domains_to_backend(cls, domain_details, albabackend_name): """ Link a existing domain(s) and/or recovery (domains) to a storagerouter @@ -113,9 +110,6 @@ def link_domains_to_backend(domain_details, albabackend_name, api): :type domain_details: dict :param albabackend_name: name of a existing alba backend :type albabackend_name: str - :param api: specify a valid api connection to the setup - :type api: helpers.api.OVSClient - :return: """ albabackend_guid = BackendHelper.get_backend_guid_by_name(albabackend_name) @@ -126,7 +120,7 @@ def link_domains_to_backend(domain_details, albabackend_name, api): domain_guids.append(DomainHelper.get_domainguid_by_name(domain_name)) data = {"domain_guids": domain_guids} - api.post( + cls.api.post( api='/backends/{0}/set_domains/'.format(albabackend_guid), data=data ) diff --git a/setup/proxy.py b/setup/proxy.py index 1da982c..db40fb2 100644 --- a/setup/proxy.py +++ b/setup/proxy.py @@ -1,4 +1,5 @@ # Copyright (C) 2016 iNuron NV +# Copyright (C) 2016 iNuron NV # # This file is part of Open vStorage Open Source Edition (OSE), # as available from diff --git a/setup/roles.py b/setup/roles.py index e56b8ea..f781638 100644 --- a/setup/roles.py +++ b/setup/roles.py @@ -14,12 +14,13 @@ # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. +from ci.scenario_helpers.ci_constants import CIConstants from ovs.extensions.generic.logger import Logger from ..helpers.storagerouter import StoragerouterHelper from ..validate.decorators import check_role_on_disk -class RoleSetup(object): +class RoleSetup(CIConstants): LOGGER = Logger("setup-ci_role_setup") CONFIGURE_DISK_TIMEOUT = 300 @@ -29,28 +30,27 @@ class RoleSetup(object): def __init__(self): pass - @staticmethod + @classmethod @check_role_on_disk - def add_disk_role(storagerouter_ip, diskname, roles, api, min_size=MIN_PARTITION_SIZE): + def add_disk_role(cls, storagerouter_ip, diskname, roles, min_size=MIN_PARTITION_SIZE): + """ Partition and adds roles to a disk - :param storagerouter_ip: ip address of a existing storagerouter + :param storagerouter_ip: guid of an existing storagerouter :type storagerouter_ip: str :param diskname: shortname of a disk (e.g. sdb) :type diskname: str :param roles: list of roles you want to add to the disk :type roles: list - :param api: specify a valid api connection to the setup - :type api: helpers.api.OVSClient :param min_size: minimum total_partition_size that is required to allocate the disk role :type min_size: int :return: """ # Fetch information - storagerouter_guid = StoragerouterHelper.get_storagerouter_guid_by_ip(storagerouter_ip) - disk = StoragerouterHelper.get_disk_by_ip(storagerouter_ip, diskname) + storagerouter_guid = StoragerouterHelper.get_storagerouter_by_ip(storagerouter_ip).guid + disk = StoragerouterHelper.get_disk_by_guid(storagerouter_guid, diskname) # Check if there are any partitions on the disk, if so check if there is enough space unused_partitions = [] if len(disk.partitions) > 0: @@ -60,7 +60,7 @@ def add_disk_role(storagerouter_ip, diskname, roles, api, min_size=MIN_PARTITION # Check if the partition is in use - could possibly write role on unused partition if partition.mountpoint is None: # Means no output -> partition not mounted - # @Todo support partitions that are not sequentional + # @Todo support partitions that are not sequential unused_partitions.append(partition) # Elect biggest unused partition as potential candidate @@ -72,20 +72,20 @@ def add_disk_role(storagerouter_ip, diskname, roles, api, min_size=MIN_PARTITION if ((disk.size-total_partition_size)/1024**3) > min_size: # disk is still large enough, let the partitioning begin and apply some roles! RoleSetup.configure_disk(storagerouter_guid=storagerouter_guid, disk_guid=disk.guid, offset=total_partition_size + 1, - size=(disk.size-total_partition_size)-1, roles=roles, api=api) + size=(disk.size-total_partition_size)-1, roles=roles) elif biggest_unused_partition is not None and (biggest_unused_partition.size/1024**3) > min_size: RoleSetup.configure_disk(storagerouter_guid=storagerouter_guid, disk_guid=disk.guid, offset=biggest_unused_partition.offset, - size=biggest_unused_partition.size, roles=roles, api=api, partition_guid=biggest_unused_partition.guid) + size=biggest_unused_partition.size, roles=roles, partition_guid=biggest_unused_partition.guid) else: # disk is too small raise RuntimeError("Disk `{0}` on node `{1}` is too small for role(s) `{2}`, min. total_partition_size is `{3}`" - .format(diskname, storagerouter_ip, roles, min_size)) + .format(diskname, storagerouter_guid, roles, min_size)) else: # there are no partitions on the disk, go nuke it! - RoleSetup.configure_disk(storagerouter_guid, disk.guid, 0, disk.size, roles, api) + RoleSetup.configure_disk(storagerouter_guid, disk.guid, 0, disk.size, roles) - @staticmethod - def configure_disk(storagerouter_guid, disk_guid, offset, size, roles, api, partition_guid=None, + @classmethod + def configure_disk(cls, storagerouter_guid, disk_guid, offset, size, roles, partition_guid=None, timeout=CONFIGURE_DISK_TIMEOUT): """ Partition a disk and add roles to it @@ -100,8 +100,6 @@ def configure_disk(storagerouter_guid, disk_guid, offset, size, roles, api, part :type size: int :param roles: roles to add to a partition (e.g. ['DB', 'WRITE']) :type roles: list - :param api: specify a valid api connection to the setup - :type api: helpers.api.OVSClient :param timeout: time to wait for the task to complete :type timeout: int :param partition_guid: guid of the partition @@ -116,12 +114,11 @@ def configure_disk(storagerouter_guid, disk_guid, offset, size, roles, api, part 'roles': roles, 'partition_guid': partition_guid } - task_guid = api.post( + task_guid = cls.api.post( api='/storagerouters/{0}/configure_disk/'.format(storagerouter_guid), data=data ) - task_result = api.wait_for_task(task_id=task_guid, timeout=timeout) - + task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) if not task_result[0]: error_msg = "Adjusting disk `{0}` has failed on storagerouter `{1}` with error '{2}'" \ .format(disk_guid, storagerouter_guid, task_result[1]) diff --git a/setup/vdisk.py b/setup/vdisk.py index 95bd28d..cf4d9d8 100644 --- a/setup/vdisk.py +++ b/setup/vdisk.py @@ -14,6 +14,7 @@ # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. +from ci.scenario_helpers.ci_constants import CIConstants from ovs.extensions.generic.logger import Logger from ..helpers.storagerouter import StoragerouterHelper from ..helpers.vdisk import VDiskHelper @@ -21,7 +22,7 @@ from ..validate.decorators import required_vdisk, required_snapshot, required_vtemplate -class VDiskSetup(object): +class VDiskSetup(CIConstants): LOGGER = Logger("setup-ci_vdisk_setup") CREATE_SNAPSHOT_TIMEOUT = 60 @@ -34,8 +35,8 @@ class VDiskSetup(object): def __init__(self): pass - @staticmethod - def create_snapshot(snapshot_name, vdisk_name, vpool_name, api, consistent=True, sticky=True, + @classmethod + def create_snapshot(snapshot_name, vdisk_name, vpool_name, consistent=True, sticky=True, timeout=CREATE_SNAPSHOT_TIMEOUT): """ Create a new snapshot for a vdisk @@ -49,8 +50,6 @@ def create_snapshot(snapshot_name, vdisk_name, vpool_name, api, consistent=True, :type consistent: bool :param sticky: let this snapshot stick forever? :type sticky: bool - :param api: specify a valid api connection to the setup - :type api: ci.helpers.api.OVSClient :param timeout: time to wait for the task to complete :type timeout: int :param vpool_name: name of a existing vpool @@ -66,11 +65,11 @@ def create_snapshot(snapshot_name, vdisk_name, vpool_name, api, consistent=True, 'sticky': sticky } - task_guid = api.post( + task_guid = cls.api.post( api='/vdisks/{0}/create_snapshot/'.format(vdisk_guid), data=data ) - task_result = api.wait_for_task(task_id=task_guid, timeout=timeout) + task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) if not task_result[0]: error_msg = "Creating snapshot `{0}` for vdisk `{1}` on vPool `{2}` has failed"\ @@ -82,8 +81,8 @@ def create_snapshot(snapshot_name, vdisk_name, vpool_name, api, consistent=True, .format(snapshot_name, vdisk_name, vpool_name)) return task_result[1] - @staticmethod - def create_vdisk(vdisk_name, vpool_name, size, storagerouter_ip, api, timeout=CREATE_VDISK_TIMEOUT): + @classmethod + def create_vdisk(cls, vdisk_name, vpool_name, size, storagerouter_ip, timeout=CREATE_VDISK_TIMEOUT): """ Create a new vDisk on a certain vPool/storagerouter :param vdisk_name: location of a vdisk on a vpool (e.g. /mnt/vpool/test.raw = test.raw, /mnt/vpool/volumes/test.raw = volumes/test.raw ) @@ -94,8 +93,6 @@ def create_vdisk(vdisk_name, vpool_name, size, storagerouter_ip, api, timeout=CR :type size: int :param storagerouter_ip: ip address of a existing storagerouter :type storagerouter_ip: str - :param api: specify a valid api connection to the setup - :type api: ci.helpers.api.OVSClient :param timeout: time to wait for the task to complete :type timeout: int :param vpool_name: name of a existing vpool @@ -117,11 +114,11 @@ def create_vdisk(vdisk_name, vpool_name, size, storagerouter_ip, api, timeout=CR "vpool_guid": vpool_guid, "storagerouter_guid": storagerouter_guid} - task_guid = api.post( + task_guid = cls.api.post( api='/vdisks/', data=data ) - task_result = api.wait_for_task(task_id=task_guid, timeout=timeout) + task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) if not task_result[0]: error_msg = "Creating vdisk `{0}` on vPool `{1}` on storagerouter `{2}` has failed with error {3}"\ @@ -133,24 +130,23 @@ def create_vdisk(vdisk_name, vpool_name, size, storagerouter_ip, api, timeout=CR .format(vdisk_name, vpool_name, storagerouter_ip)) return task_result[1] - @staticmethod + @classmethod @required_vdisk - def move_vdisk(vdisk_guid, target_storagerouter_guid, api, timeout=60): + def move_vdisk(cls, vdisk_guid, target_storagerouter_guid, timeout=60): """ Moves a vdisk :param vdisk_guid: guid of the vdisk :param target_storagerouter_guid: guid of the storuagerouter to move to - :param api: instance of ovs client :param timeout: timeout in seconds :return: """ data = {"target_storagerouter_guid": target_storagerouter_guid} - task_guid = api.post( + task_guid = cls.api.post( api='/vdisks/{0}/move/'.format(vdisk_guid), data=data ) - task_result = api.wait_for_task(task_id=task_guid, timeout=timeout) + task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) if not task_result[0]: error_msg = "Moving vdisk {0} to {1} has failed with {2}.".format( @@ -162,10 +158,10 @@ def move_vdisk(vdisk_guid, target_storagerouter_guid, api, timeout=60): "Vdisk {0} should have been moved to {1}.".format(vdisk_guid, target_storagerouter_guid)) return task_result[1] - @staticmethod + @classmethod @required_vdisk @required_snapshot - def create_clone(vdisk_name, vpool_name, new_vdisk_name, storagerouter_ip, api, snapshot_id=None, + def create_clone(cls, vdisk_name, vpool_name, new_vdisk_name, storagerouter_ip, snapshot_id=None, timeout=CREATE_CLONE_TIMEOUT): """ Create a new vDisk on a certain vPool/storagerouter @@ -179,8 +175,6 @@ def create_clone(vdisk_name, vpool_name, new_vdisk_name, storagerouter_ip, api, :type storagerouter_ip: str :param snapshot_id: GUID of a existing snapshot (DEFAULT=None -> will create new snapshot) :type snapshot_id: str - :param api: specify a valid api connection to the setup - :type api: ci.helpers.api.OVSClient :param timeout: time to wait for the task to complete :type timeout: int :param vpool_name: name of a existing vpool @@ -210,11 +204,11 @@ def create_clone(vdisk_name, vpool_name, new_vdisk_name, storagerouter_ip, api, "storagerouter_guid": storagerouter_guid, "snapshot_id": snapshot_id} - task_guid = api.post( + task_guid = cls.api.post( api='/vdisks/{0}/clone'.format(vdisk.guid), data=data ) - task_result = api.wait_for_task(task_id=task_guid, timeout=timeout) + task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) if not task_result[0]: error_msg = "Creating clone `{0}` with snapshot_id `{4}` on vPool `{1}` on storagerouter `{2}` " \ @@ -228,9 +222,9 @@ def create_clone(vdisk_name, vpool_name, new_vdisk_name, storagerouter_ip, api, snapshot_id)) return task_result[1] - @staticmethod + @classmethod @required_vdisk - def set_vdisk_as_template(vdisk_name, vpool_name, api, timeout=SET_VDISK_AS_TEMPLATE_TIMEOUT): + def set_vdisk_as_template(vdisk_name, vpool_name, timeout=SET_VDISK_AS_TEMPLATE_TIMEOUT): """ Create a new vDisk on a certain vPool/storagerouter Set a existing vDisk as vTemplate @@ -240,17 +234,15 @@ def set_vdisk_as_template(vdisk_name, vpool_name, api, timeout=SET_VDISK_AS_TEMP :type vdisk_name: str :param vpool_name: name of a existing vpool :type vpool_name: str - :param api: specify a valid api connection to the setup - :type api: ci.helpers.api.OVSClient :param timeout: time to wait for the task to complete """ # fetch the requirements vdisk = VDiskHelper.get_vdisk_by_name(vdisk_name, vpool_name) - task_guid = api.post( + task_guid = cls.api.post( api='/vdisks/{0}/set_as_template'.format(vdisk.guid) ) - task_result = api.wait_for_task(task_id=task_guid, timeout=timeout) + task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) if not task_result[0]: error_msg = "Creating vTemplate `{0}` has failed with error {1}".format(vdisk_name, task_result[1]) @@ -260,9 +252,9 @@ def set_vdisk_as_template(vdisk_name, vpool_name, api, timeout=SET_VDISK_AS_TEMP VDiskSetup.LOGGER.info("Creating vTemplate `{0}` should have succeeded".format(vdisk_name)) return task_result[1] - @staticmethod + @classmethod @required_vtemplate - def create_from_template(vdisk_name, vpool_name, new_vdisk_name, storagerouter_ip, api, + def create_from_template(cls, vdisk_name, vpool_name, new_vdisk_name, storagerouter_ip, timeout=SET_VDISK_AS_TEMPLATE_TIMEOUT): """ Create a new vDisk on a certain vPool/storagerouter @@ -275,8 +267,6 @@ def create_from_template(vdisk_name, vpool_name, new_vdisk_name, storagerouter_i :type new_vdisk_name: str :param storagerouter_ip: ip address of a existing storagerouter where the clone will be deployed :type storagerouter_ip: str - :param api: specify a valid api connection to the setup - :type api: ci.helpers.api.OVSClient :param timeout: time to wait for the task to complete :return: dict with info about the new vdisk {'vdisk_guid': new_vdisk.guid, 'name': new_vdisk.name, 'backingdevice': devicename} :rtype: dict @@ -294,11 +284,11 @@ def create_from_template(vdisk_name, vpool_name, new_vdisk_name, storagerouter_i data = {"name": official_new_vdisk_name, "storagerouter_guid": storagerouter_guid} - task_guid = api.post( + task_guid = cls.api.post( api='/vdisks/{0}/create_from_template'.format(vdisk.guid), data=data ) - task_result = api.wait_for_task(task_id=task_guid, timeout=timeout) + task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) if not task_result[0]: error_msg = "Creating vTemplate `{0}` has failed with error {1}".format(vdisk_name, task_result[1]) @@ -308,9 +298,9 @@ def create_from_template(vdisk_name, vpool_name, new_vdisk_name, storagerouter_i VDiskSetup.LOGGER.info("Creating vTemplate `{0}` should have succeeded".format(vdisk_name)) return task_result[1] - @staticmethod + @classmethod @required_vdisk - def rollback_to_snapshot(vdisk_name, vpool_name, snapshot_id, api, timeout=ROLLBACK_VDISK_TIMEOUT): + def rollback_to_snapshot(cls, vdisk_name, vpool_name, snapshot_id, timeout=ROLLBACK_VDISK_TIMEOUT): """ Rollback a vdisk to a certain snapshot @@ -321,8 +311,6 @@ def rollback_to_snapshot(vdisk_name, vpool_name, snapshot_id, api, timeout=ROLLB :type vpool_name: str :param snapshot_id: guid of a snapshot for the chosen vdisk :type snapshot_id: str - :param api: specify a valid api connection to the setup - :type api: ci.helpers.api.OVSClient :param timeout: time to wait for the task to complete """ @@ -331,11 +319,11 @@ def rollback_to_snapshot(vdisk_name, vpool_name, snapshot_id, api, timeout=ROLLB snapshot = VDiskHelper.get_snapshot_by_guid(snapshot_guid=snapshot_id, vdisk_name=vdisk_name, vpool_name=vpool_name) - task_guid = api.post( + task_guid = cls.api.post( api='/vdisks/{0}/rollback'.format(vdisk_guid), data={"timestamp": snapshot['timestamp']} ) - task_result = api.wait_for_task(task_id=task_guid, timeout=timeout) + task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) if not task_result[0]: error_msg = "Rollback vDisk `{0}` has failed with error {1}".format(vdisk_name, task_result[1]) @@ -345,9 +333,9 @@ def rollback_to_snapshot(vdisk_name, vpool_name, snapshot_id, api, timeout=ROLLB VDiskSetup.LOGGER.info("Rollback vDisk `{0}` should have succeeded".format(vdisk_name)) return task_result[1] - @staticmethod + @classmethod @required_vdisk - def set_config_params(vdisk_name, vpool_name, config, api, timeout=SET_CONFIG_VDISK_TIMEOUT): + def set_config_params(vdisk_name, vpool_name, config, timeout=SET_CONFIG_VDISK_TIMEOUT): """ Rollback a vdisk to a certain snapshot @@ -366,8 +354,6 @@ def set_config_params(vdisk_name, vpool_name, config, api, timeout=SET_CONFIG_VD ] } :type config: dict - :param api: specify a valid api connection to the setup - :type api: ci.helpers.api.OVSClient :param timeout: time to wait for the task to complete :type timeout: int :rtype: dict @@ -377,11 +363,11 @@ def set_config_params(vdisk_name, vpool_name, config, api, timeout=SET_CONFIG_VD # fetch the requirements vdisk_guid = VDiskHelper.get_vdisk_by_name(vdisk_name=vdisk_name, vpool_name=vpool_name).guid - task_guid = api.post( + task_guid = cls.api.post( api='/vdisks/{0}/set_config_params'.format(vdisk_guid), data={"new_config_params": config} ) - task_result = api.wait_for_task(task_id=task_guid, timeout=timeout) + task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) if not task_result[0]: error_msg = "Setting config vDisk `{0}` has failed with error {1}".format(vdisk_name, task_result[1]) diff --git a/setup/vpool.py b/setup/vpool.py index eb2839c..f338bd2 100644 --- a/setup/vpool.py +++ b/setup/vpool.py @@ -14,6 +14,7 @@ # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. +from ci.scenario_helpers.ci_constants import CIConstants from ovs.lib.generic import GenericController from ovs.extensions.generic.logger import Logger from ..helpers.backend import BackendHelper @@ -21,7 +22,7 @@ from ..validate.decorators import required_roles, check_vpool -class VPoolSetup(object): +class VPoolSetup(CIConstants): LOGGER = Logger('setup-ci_vpool_setup') ADD_VPOOL_TIMEOUT = 500 @@ -30,10 +31,10 @@ class VPoolSetup(object): def __init__(self): pass - @staticmethod + @classmethod @check_vpool @required_roles(REQUIRED_VPOOL_ROLES, 'LOCAL') - def add_vpool(vpool_name, vpool_details, api, storagerouter_ip, proxy_amount=2, timeout=ADD_VPOOL_TIMEOUT, *args, **kwargs): + def add_vpool(cls, vpool_name, vpool_details, storagerouter_ip, proxy_amount=2, timeout=ADD_VPOOL_TIMEOUT, *args, **kwargs): """ Adds a VPool to a storagerouter @@ -43,8 +44,6 @@ def add_vpool(vpool_name, vpool_details, api, storagerouter_ip, proxy_amount=2, :type vpool_details: dict :param timeout: specify a timeout :type timeout: int - :param api: specify a valid api connection to the setup - :type api: helpers.api.OVSClient :param storagerouter_ip: ip of the storagerouter to add the vpool too :type storagerouter_ip: str :param proxy_amount: amount of proxies for this vpool @@ -100,13 +99,13 @@ def add_vpool(vpool_name, vpool_details, api, storagerouter_ip, proxy_amount=2, error_msg = 'Wrong `block_cache->location` in vPool configuration, it should be `disk` or `backend`' VPoolSetup.LOGGER.error(error_msg) raise RuntimeError(error_msg) - - task_guid = api.post( + + task_guid = cls.api.post( api='/storagerouters/{0}/add_vpool/'.format( - StoragerouterHelper.get_storagerouter_guid_by_ip(storagerouter_ip)), + StoragerouterHelper.get_storagerouter_by_ip(storagerouter_ip).guid), data=api_data ) - task_result = api.wait_for_task(task_id=task_guid, timeout=timeout) + task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) if not task_result[0]: error_msg = 'vPool {0} has failed to create on storagerouter {1} because: {2}'.format(vpool_name, storagerouter_ip, task_result[1]) VPoolSetup.LOGGER.error(error_msg) diff --git a/validate/roles.py b/validate/roles.py index f37f59e..6f30ee9 100644 --- a/validate/roles.py +++ b/validate/roles.py @@ -15,6 +15,8 @@ # but WITHOUT ANY WARRANTY of any kind. from ovs.extensions.generic.logger import Logger from ..helpers.disk import DiskHelper +from ..helpers.storagerouter import StoragerouterHelper + class RoleValidation(object): @@ -31,7 +33,7 @@ def check_required_roles(roles, storagerouter_ip=None, location="GLOBAL"): :param roles: the required roles :type roles: list - :param storagerouter_ip: ip address of a storagerouter + :param storagerouter_ip: guid of a storagerouter :type storagerouter_ip: str :param location: * GLOBAL: checks the whole cluster if certain roles are available @@ -39,11 +41,12 @@ def check_required_roles(roles, storagerouter_ip=None, location="GLOBAL"): :type location: str :return: None """ - # fetch availabe roles if location == "LOCAL": # LOCAL - available_roles = DiskHelper.get_roles_from_disks(storagerouter_ip=storagerouter_ip) + storagerouter_guid = StoragerouterHelper.get_storagerouter_by_ip(storagerouter_ip).guid + + available_roles = DiskHelper.get_roles_from_disks(storagerouter_guid=storagerouter_guid) else: # GLOBAL available_roles = DiskHelper.get_roles_from_disks() @@ -62,7 +65,7 @@ def check_required_roles(roles, storagerouter_ip=None, location="GLOBAL"): # append storagerouter_ip if searching on a LOCAL node if location == "LOCAL": - error_msg += " on storagerouter {0}".format(storagerouter_ip) + error_msg += " on storagerouter {0}".format(storagerouter_guid) RoleValidation.LOGGER.error(error_msg) raise RuntimeError(error_msg) @@ -82,4 +85,5 @@ def check_role_on_disk(roles, storagerouter_ip, disk_name): :return: if available on disk :rtype: bool """ - return len(set(roles).difference(set(DiskHelper.get_roles_from_disk(storagerouter_ip, disk_name)))) == 0 + storagerouter_guid = StoragerouterHelper.get_storagerouter_by_ip(storagerouter_ip).guid + return len(set(roles).difference(set(DiskHelper.get_roles_from_disk(storagerouter_guid, disk_name)))) == 0 From 341e4c2e73c1bd44702599b8b84f824dd411f1a3 Mon Sep 17 00:00:00 2001 From: jeroenmaelbrancke Date: Tue, 21 Nov 2017 11:22:45 +0100 Subject: [PATCH 04/28] Create vms with cloud init. Fixed the mac default --- helpers/hypervisor/apis/kvm/option_mapping.py | 2 +- helpers/hypervisor/apis/kvm/sdk.py | 134 ++++++++++++++++++ helpers/hypervisor/hypervisors/kvm.py | 25 ++++ helpers/hypervisor/hypervisors/vmware.py | 25 ++++ 4 files changed, 185 insertions(+), 1 deletion(-) diff --git a/helpers/hypervisor/apis/kvm/option_mapping.py b/helpers/hypervisor/apis/kvm/option_mapping.py index 7f0eaad..5a4784e 100644 --- a/helpers/hypervisor/apis/kvm/option_mapping.py +++ b/helpers/hypervisor/apis/kvm/option_mapping.py @@ -104,7 +104,7 @@ class SdkOptionMapping(object): "mac": { "option": "mac", "values": None, - "default": "random", + "default": "RANDOM", "type": str }, } diff --git a/helpers/hypervisor/apis/kvm/sdk.py b/helpers/hypervisor/apis/kvm/sdk.py index 753754f..10fd4bc 100644 --- a/helpers/hypervisor/apis/kvm/sdk.py +++ b/helpers/hypervisor/apis/kvm/sdk.py @@ -674,6 +674,140 @@ def create_vm(self, name, vcpus, ram, disks, cdrom_iso=None, os_type=None, os_va print ' '.join(command+options) raise RuntimeError(msg) + def create_vm_from_cloud_init(self, name, vcpus, ram, bridge, ip, netmask, gateway, nameserver, amount_disks, size, + mountpoint, cloud_init_url, cloud_init_name, root_password, force=False): + + template_directory = '/var/lib/libvirt/images' + vmdk_file = "{0}/{1}.vmdk".format(template_directory, cloud_init_name) + qcow_file = "{0}/{1}.qcow2".format(template_directory, cloud_init_name) + # Check if cloud_init already exists if not download vmdk + if not self.ssh_client.file_exists(vmdk_file): + self.ssh_client.run(["wget", "-O", vmdk_file, cloud_init_url]) + + if not self.ssh_client.file_exists(qcow_file): + self.ssh_client.run(["qemu-img", "convert", "-O", "qcow2", vmdk_file, qcow_file]) + + vm_directory = "{0}/{1}".format(template_directory, name) + user_data = "{0}/user-data".format(vm_directory) + meta_data = "{0}/meta-data".format(vm_directory) + ci_iso = "{0}/{1}.iso".format(vm_directory, name) + boot_disk = "{0}/{1}.qcow2".format(vm_directory, name) + boot_disk_size = "30G" + + meta_data_lines = [ + 'instance-id: {0}'.format(uuid.uuid1()), + 'local-hostname: {0}'.format(name), + 'network-interfaces: |', + ' auto ens3', + ' iface ens3 inet static', + ' address {0}'.format(ip), + ' netmask {0}'.format(netmask), + ' gateway {0}'.format(gateway), + 'manage_resolve_conf: True', + 'resolv_conf:', + ' nameservers:[{0}]'.format(nameserver), + '' + ] + + user_data_lines = [ + '#cloud-config', + 'hostname: {0}'.format(name), + 'manage_etc_hosts: True', + 'disable_root: False', + 'password: {0}'.format(root_password), + 'ssh_pwauth: True', + 'chpasswd:', + ' list: |', + ' root:{0}'.format(root_password), + ' ubuntu:{0}'.format(root_password), + ' expire: False', + 'runcmd:', + ' - [sed, -ie, "s/PermitRootLogin prohibit-password/PermitRootLogin yes/", /etc/ssh/sshd_config]', + ' - [sed, -ie, "s/PasswordAuthentication no/PasswordAuthentication yes/", /etc/ssh/sshd_config]', + ' - [service, ssh, restart]', + '' + ] + + # Check if vm already exists with this name + vm = None + + try: + vm = self._conn.lookupByName(name) + except libvirt.libvirtError: + pass + + if vm and force: + self.delete_vm(vm, True) + elif vm and not force: + raise Exception('VM {0} is still defined on this hypervisor. Use the force=True option to delete.'.format(name)) + + if self.ssh_client.dir_exists(vm_directory): + exists, used_disk, vm_name = self._check_disks_in_use([ci_iso, boot_disk]) + if exists: + raise Exception("Virtual Disk {0} in used by {1}".format(used_disk, vm_name)) + + self.ssh_client.dir_delete(vm_directory) + + self.ssh_client.dir_create(vm_directory) + # Copy template image + self.ssh_client.run(["cp", qcow_file, boot_disk]) + + # Resize image + self.ssh_client.run(["qemu-img", "resize", boot_disk, boot_disk_size]) + + # Create metadata and user data file + self.ssh_client.file_write(meta_data, '\n'.join(meta_data_lines)) + + self.ssh_client.file_write(user_data, '\n'.join(user_data_lines)) + + # Generate iso for cloud-init + self.ssh_client.run(["genisoimage", "-output", ci_iso, "-volid", "cidata", "-joliet", "-r", user_data, meta_data]) + + # Create extra disks + extra_disks = [] + if amount_disks > 0 and size > 0: + if not self.ssh_client.dir_exists(mountpoint): + raise Exception("Directory {0} doesn't exists.".format(mountpoint)) + + for i in xrange(1, amount_disks+1): + disk_path = "%s/%s_%02d.qcow2" % (mountpoint, name, i,) + exists, used_disk, vm_name = self._check_disks_in_use(disk_path) + disk_exists_filesystem = self.ssh_client.file_exists(disk_path) + if disk_exists_filesystem and exists: + raise Exception("Virtual Disk {0} in used by {1}".format(used_disk, vm_name)) + elif disk_exists_filesystem: + self.ssh_client.file_delete(disk_path) + + self.ssh_client.run(['qemu-img', 'create', '-f', 'qcow2', disk_path, size]) + extra_disks.append(disk_path) + + all_disks = [{'mountpoint': boot_disk, "format": "qcow2", "bus": "virtio"}] + + for extra_disk in extra_disks: + all_disks.append({'mountpoint': extra_disk, "format": "qcow2", "bus": "virtio"}) + + self.create_vm(name=name, vcpus=vcpus, ram=ram, disks=all_disks, cdrom_iso=ci_iso, + networks=[{"bridge": bridge, "model": "virtio"}], start=True) + + def _check_disks_in_use(self, disk_paths): + """ + check if disks are in used + :param disks: list of disk paths + :type disks: list + :return: bool + """ + for dom in self.get_vms(): + dom_info = ElementTree.fromstring(dom.XMLDesc(0)) + disks = dom_info.findall('.//disk') + for disk in disks: + if disk.find('source') is None: + continue + used_disk = disk.find('source').get('file') + if used_disk in disk_paths: + return True, used_disk, dom_info.find('name').text + raise Exception("Virtual Disk {0} in used by {1}".format(used_disk, dom_info.find('name').text)) + return False, '', '' + @staticmethod def _update_xml_for_ovs(xml, edge_configuration): """ diff --git a/helpers/hypervisor/hypervisors/kvm.py b/helpers/hypervisor/hypervisors/kvm.py index 899fb84..3067bf1 100644 --- a/helpers/hypervisor/hypervisors/kvm.py +++ b/helpers/hypervisor/hypervisors/kvm.py @@ -46,6 +46,31 @@ def create_vm_from_template(self, name, source_vm, disks, ip, mountpoint, wait=T _ = ip, wait # For compatibility purposes only return self.sdk.create_vm_from_template(name, source_vm, disks, mountpoint) + def create_vm_from_cloud_init(self, name, vcpus, ram, bridge, ip, netmask, gateway, nameserver, amount_disks, size, + mountpoint, cloud_init_url, cloud_init_name, root_password, force=False): + """ + create vm from cloud init + :param name: name of the vm + :param vcpus: amount of vCPUs + :param ram: amount of ram + :param bridge: network bridge + :param ip: ip address + :param netmask: netmask + :param gateway: gateway + :param nameserver: nameserver + :param amount_disks: amount of extra disks + :param size: the size of the extra disks + :param mountpoint: mountpoint where the extra disks will be stored + :param cloud_init_url: cloud init url + :param cloud_init_name: cloud init name + :param root_password: root password of the vm + :param force: force delete existing vms with this name + :return: + """ + return self.sdk.create_vm_from_cloud_init(name, vcpus, ram, bridge, ip, netmask, gateway, nameserver, + amount_disks, size, mountpoint, cloud_init_url, cloud_init_name, + root_password, force) + def delete_vm(self, vmid, storagedriver_mountpoint=None, storagedriver_storage_ip=None, devicename=None, disks_info=None, wait=True): """ Deletes a given VM and its disks diff --git a/helpers/hypervisor/hypervisors/vmware.py b/helpers/hypervisor/hypervisors/vmware.py index 55b39da..db8df01 100644 --- a/helpers/hypervisor/hypervisors/vmware.py +++ b/helpers/hypervisor/hypervisors/vmware.py @@ -51,6 +51,31 @@ def create_vm_from_template(self, name, source_vm, disks, ip, mountpoint, wait=T return task_info.info.result.value return None + def create_vm_from_cloud_init(self, name, vcpus, ram, bridge, ip, netmask, gateway, nameserver, amount_disks, size, + mountpoint, cloud_init_url, cloud_init_name, root_password, force=False): + """ + create vm from cloud init + :param name: name of the vm + :param vcpus: amount of vCPUs + :param ram: amount of ram + :param bridge: network bridge + :param ip: ip address + :param netmask: netmask + :param gateway: gateway + :param nameserver: nameserver + :param amount_disks: amount of extra disks + :param size: the size of the extra disks + :param mountpoint: mountpoint where the extra disks will be stored + :param cloud_init_url: cloud init url + :param cloud_init_name: cloud init name + :param root_password: root password of the vm + :param force: force delete existing vms with this name + :return: + """ + return self.sdk.create_vm_from_cloud_init(name, vcpus, ram, bridge, ip, netmask, gateway, nameserver, + amount_disks, size, mountpoint, cloud_init_url, cloud_init_name, + root_password, force) + def clone_vm(self, vmid, name, disks, mountpoint, wait=False): """ Clone a vmachine From f75ba930bb521658cb99e9a1d60d9495ee0d2d98 Mon Sep 17 00:00:00 2001 From: jeroenmaelbrancke Date: Tue, 21 Nov 2017 11:44:54 +0100 Subject: [PATCH 05/28] added postinst script. --- packaging/debian/debian/openvstorage-automation-lib.postinst | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 packaging/debian/debian/openvstorage-automation-lib.postinst diff --git a/packaging/debian/debian/openvstorage-automation-lib.postinst b/packaging/debian/debian/openvstorage-automation-lib.postinst new file mode 100644 index 0000000..3e82486 --- /dev/null +++ b/packaging/debian/debian/openvstorage-automation-lib.postinst @@ -0,0 +1,3 @@ +#!/bin/bash + +touch /opt/OpenvStorage/ci/__init__.py From 5d7f685198431fc51ae403a391197ca88d65118b Mon Sep 17 00:00:00 2001 From: simon Date: Tue, 21 Nov 2017 11:45:40 +0100 Subject: [PATCH 06/28] -generator for setup.json file -unittest for generator --- helpers/setup_json_generator.py | 297 +++++++++++++++++++++ helpers/tests/__init__.py | 19 ++ helpers/tests/json_generator_testcase.py | 313 +++++++++++++++++++++++ 3 files changed, 629 insertions(+) create mode 100644 helpers/setup_json_generator.py create mode 100644 helpers/tests/__init__.py create mode 100644 helpers/tests/json_generator_testcase.py diff --git a/helpers/setup_json_generator.py b/helpers/setup_json_generator.py new file mode 100644 index 0000000..11de13f --- /dev/null +++ b/helpers/setup_json_generator.py @@ -0,0 +1,297 @@ +# Copyright (C) 2016 iNuron NV +# +# This file is part of Open vStorage Open Source Edition (OSE), +# as available from +# +# http://www.openvstorage.org and +# http://www.openvstorage.com. +# +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU Affero General Public License v3 (GNU AGPLv3) +# as published by the Free Software Foundation, in version 3 as it comes +# in the LICENSE.txt file of the Open vStorage OSE distribution. +# +# Open vStorage is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY of any kind. + +import json +import socket +from ovs.lib.helpers.toolbox import Toolbox +import difflib + + +class Setup_json_generator(object): + def __init__(self): + self.json = { + 'ci': {}, + 'scenarios': [], + 'setup': { + "domains": [], "backends": [], "storagerouters": {} + } + } + + self._presets = [] + self._domains = [] + self._backends = [] + self._ips = [] + + def get_dict(self): + return self.json + + def write_to_json(self, path): + with open(path, 'w') as fp: + json.dump(self.get_dict(), indent=4, sort_keys=True, fp=fp) + + def model_scenarios(self, scenarios=None): + if not isinstance(scenarios,list) and scenarios is not None: + raise ValueError('Scenarios should be passed in a list format, not {}'.format(type(scenarios))) + if scenarios == None: + self.json['scenarios'] = ["ALL"] + else: + self.json['scenarios'] = scenarios + + def model_ci(self, grid_ip, optional_params=None): + if optional_params is None: + optional_params = {} + + try: + socket.inet_aton(grid_ip) + except TypeError: + raise ValueError('Invalid ip adress provided: {}'.format(grid_ip)) + + bool_options = {'setup': True, 'validation': True, 'scenarios': True, 'cleanup': False, 'send_to_testrail': True, + 'fail_on_failed_scenario': True} + int_options = {'setup_retries': 1, 'scenario_retries': 1} + str_options = {'version': 'fargo', 'config_manager': 'arakoon'} + + for key, value in optional_params: + if key in bool_options.keys() and isinstance(value, bool): + bool_options[key] = value + elif key in int_options.keys() and isinstance(value, int): + int_options[key] = value + elif key in str_options.keys() and isinstance(value, str): + str_options[key] = value + + ci = { + "setup": bool_options['setup'], # Required + "validation": bool_options['validation'], # Not even used + "scenarios": bool_options['scenarios'], # Optional + "cleanup": bool_options['cleanup'], # optional + "send_to_testrail": bool_options['send_to_testrail'], # Optional + "fail_on_failed_scenario": bool_options['fail_on_failed_scenario'], # Optional + "setup_retries": int_options['setup_retries'], # Optional + "scenario_retries": int_options['scenario_retries'], # Optional + "version": str_options['version'], + "local_hypervisor": { # enkel voor testen, niet voor setup + "type": "KVM", + "user": "root", + "password": "rooter" + }, + + "hypervisor": {}, + "config_manager": str_options['config_manager'], + "user": { + "shell": { + "username": "root", + "password": "rooter" + }, + "api": { + "username": "admin", + "password": "admin" + } + }, + "grid_ip": grid_ip + } + self.json['ci'] = ci + + def _add_hypervisor(self, machine_ip, vms=None): + if vms is None: + vms = {'vm_ip': {'name': '_name', 'role': '_role'}} + hypervisor_dict = { + "type": "KVM", + 'ip': machine_ip, + 'user': 'root', + 'password': 'rooter', + 'vms': vms + } + for i in vms.keys(): + self._ips.append(i) + self.json['ci']['hypervisor'] = hypervisor_dict + + def add_storagerouter(self, storagerouter_ip, hostname): + try: + socket.inet_aton(storagerouter_ip) + except TypeError: + raise ValueError('Invalid ip adress provided: {0}'.format(storagerouter_ip)) + if not isinstance(hostname,str): + raise ValueError('Invalid hostname provided: {0} must be a string, not {1}'.format(hostname,type(hostname))) + sr_dict = {} + sr_dict[storagerouter_ip] = {"hostname": hostname, + "domains": {}, + "disks": {}, + "vpools": {} + } + self.json['setup']['storagerouters'].update(sr_dict) + + def _add_disk_to_sr(self, storagerouter_ip, name, roles): + if not storagerouter_ip in self.json['setup']['storagerouters'].keys(): + raise ValueError('Storagerouter with IP {0} not found in json'.format(storagerouter_ip)) + d_dict = {} + d_dict[name] = {"roles": roles} + self.json['setup']['storagerouters'][storagerouter_ip]['disks'].update(d_dict) + + def _add_domain_to_sr(self, storagerouter_ip, name, recovery=False): + if not storagerouter_ip in self.json['setup']['storagerouters'].keys(): + raise ValueError('Storagerouter with ip {0} not found in json'.format(storagerouter_ip)) + if not name in self._domains: + raise ValueError('Invalid domain passed: {}'.format(name)) + path = self.json['setup']['storagerouters'][storagerouter_ip]['domains'] + + if recovery is False: + if 'domain_guids' not in path.keys(): + path['domain_guids'] = [name] + else: + path['domain_guids'].append(name) + else: + if 'recovery_domain_guids' not in path.keys(): + path['recovery_domain_guids'] = [name] + else: + path['recovery_domain_guids'].append(name) + + def add_domain(self, domain): + if not isinstance(domain, str): + raise ValueError("domain is no string") + self._domains.append(domain) + self.json['setup']['domains'].append(domain) + + def add_backend(self, name, domains=None, scaling='LOCAL'): + if domains is None: + domains = [] + for domain_name in domains: + if domain_name not in self._domains: + raise ValueError('Invalid domain passed: {}'.format(domain_name)) + + be_dict = {'name': name, + 'domains': {'domain_guids': domains}, + 'scaling': scaling, + 'presets': [], + 'osds': {} + } + self._backends.append(be_dict['name']) + self.json['setup']['backends'].append(be_dict) + + def _add_preset_to_backend(self, backend_name, preset_name, policies, compression='snappy', encryption=None, fragment_size=2097152): + if backend_name not in self._backends: + raise ValueError('Invalid backend passed as argument: {}'.format(backend_name)) + if encryption == None: + encryption = 'none' + preset_dict = { + 'name': preset_name, + 'compression': compression, + 'encryption': encryption, + 'policies': policies, + 'fragment_size': fragment_size, + } + self._presets.append(preset_dict['name']) + for i in range(len(self.json['setup']['backends'])): + if self.json['setup']['backends'][i]['name'] == backend_name: + self.json['setup']['backends'][i]['presets'].append(preset_dict) + # 'presets'].append(preset_dict) + + def _add_osd_to_backend(self, backend_name, osds_on_disks=None, linked_backend=None, linked_preset=None): + if backend_name not in self._backends: + raise ValueError('Invalid backend passed as argument: {}'.format(backend_name)) + + osd_dict = {} + for i in range(len(self.json['setup']['backends'])): + if self.json['setup']['backends'][i]['name'] == backend_name: + scaling = self.json['setup']['backends'][i]['scaling'] + if scaling == 'LOCAL': + if osds_on_disks is None: + raise ValueError('Osd dictionairy required') + osd_dict = osds_on_disks + elif scaling == 'GLOBAL': + if linked_backend not in self._backends: + raise ValueError("Provided backend {} not in known backends".format(linked_backend)) + if linked_preset not in self._presets: + raise ValueError("Provided preset {} not in known presets".format(linked_preset)) + osd_dict = {linked_backend: linked_preset} + + else: + print ValueError('invalid scaling ({0}) passed'.format(scaling)) + self.json['setup']['backends'][i]['osds'].update(osd_dict) + + def add_vpool(self, storagerouter_ip, vpool_name, backend_name, preset, storage_ip): + if backend_name not in self._backends: + raise ValueError("Provided backend {} not in known backends".format(backend_name)) + if preset not in self._presets: + raise ValueError('Provided preset not in known presets'.format(preset)) + + vpool_dict = {'backend_name': backend_name, + 'preset': preset, + 'storage_ip': storage_ip, + 'proxies':1, + 'fragment_cache': { + 'strategy': {'cache_on_read': False, 'cache_on_write': False}, + 'location': "disk" + }, + 'block_cache': { + 'strategy': {'cache_on_read': False, 'cache_on_write': False}, + 'location': "disk" + }, + 'storagedriver': + { + "sco_size": 4, + "cluster_size": 4, + "volume_write_buffer": 512, + "strategy": "none", + "global_write_buffer": 20, + "global_read_buffer": 0, + "deduplication": "non_dedupe", + "dtl_transport": "tcp", + "dtl_mode": "sync" + } + } + self.json['setup']['storagerouters'][storagerouter_ip]['vpools'][vpool_name] = vpool_dict + + def _change_cache(self, storagerouter_ip, vpool, block_cache=True, fragment_cache=True, on_read=True, on_write=True): + try: + vpool = self.json['setup']['storagerouters'][storagerouter_ip]['vpools'][vpool] + if block_cache is True: + vpool['block_cache']['strategy']['cache_on_read'] = on_read + vpool['block_cache']['strategy']['cache_on_write'] = on_write + if fragment_cache is True: + vpool['fragment_cache']['strategy']['cache_on_read'] = on_read + vpool['fragment_cache']['strategy']['cache_on_write'] = on_write + + except KeyError as e: + raise ValueError('Vpool not found with exception {0}'.format(e)) + + @classmethod + def reformat_json(cls, in_path, out_path=None): + if not isinstance(in_path, str) or (out_path is not None and not isinstance(out_path, str)): + raise ValueError('path should be string type, got {}'.format(type(in_path))) + if out_path is None: + out_path = in_path.rstrip('.json') + '_reformatted.json' + + with open(in_path) as json_data: + d = json.load(json_data) + with open(out_path, 'w') as fp: + json.dump(d, fp=fp, indent=4, sort_keys=True) + + @classmethod + def compare_files(cls, file1, file2): + print 'comparing {} and {}'.format(file1, file2) + with open(file1) as fh1, open(file2) as fh2: + lines_file1 = fh1.readlines() + lines_file2 = fh2.readlines() + + d = difflib.Differ() + diff = d.compare(lines_file1, lines_file2) + for i in diff: + if i[0] != ' ': + print i + + + + diff --git a/helpers/tests/__init__.py b/helpers/tests/__init__.py new file mode 100644 index 0000000..8ae97b6 --- /dev/null +++ b/helpers/tests/__init__.py @@ -0,0 +1,19 @@ +# Copyright (C) 2016 iNuron NV +# +# This file is part of Open vStorage Open Source Edition (OSE), +# as available from +# +# http://www.openvstorage.org and +# http://www.openvstorage.com. +# +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU Affero General Public License v3 (GNU AGPLv3) +# as published by the Free Software Foundation, in version 3 as it comes +# in the LICENSE.txt file of the Open vStorage OSE distribution. +# +# Open vStorage is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY of any kind. + +""" +Init +""" diff --git a/helpers/tests/json_generator_testcase.py b/helpers/tests/json_generator_testcase.py new file mode 100644 index 0000000..71f17d0 --- /dev/null +++ b/helpers/tests/json_generator_testcase.py @@ -0,0 +1,313 @@ +import unittest +from ci.api_lib.helpers.setup_json_generator import Setup_json_generator +import json +import difflib +import pprint + +class Json_generator_testcase(unittest.TestCase): + + def __init__(self,*args, **kwargs): + super(Json_generator_testcase, self).__init__(*args, **kwargs) + self.generator = Setup_json_generator() + self.ip = '10.100.100.100' + + def test_structure(self): + self.assertEquals(len((self.generator).get_dict().keys()), 3) + + def test_model_ci(self): + self.generator.model_ci(grid_ip=self.ip) + self.assertTrue(isinstance(self.generator.get_dict()['ci']['setup'],bool)) + + def test_model_scenarios(self): + self.generator.model_scenarios() + self.assertEquals(self.generator.get_dict()['scenarios'],['ALL']) + self.generator.model_scenarios(['ABC','def']) + self.assertEquals(self.generator.get_dict()['scenarios'],['ABC', 'def']) + + def test_add_domain(self): + self.generator.add_domain('domain1') + self.generator.add_domain('domain2') + self.assertEquals(len(self.generator.get_dict()['setup']['domains']), 2) + with self.assertRaises(ValueError): + self.generator.add_domain(7) + + def test_add_storagerouter(self): + self.generator.add_domain('domain1') + self.generator.add_domain('domain2') + with self.assertRaises(ValueError): + self.generator.add_storagerouter(storagerouter_ip=100, hostname='hostname') + with self.assertRaises(ValueError): + self.generator.add_storagerouter(storagerouter_ip=self.ip, hostname=7) + self.generator.add_storagerouter(storagerouter_ip=self.ip,hostname='hostname') + self.assertTrue(self.ip in self.generator.get_dict()['setup']['storagerouters'].keys()) + + self.generator._add_disk_to_sr(storagerouter_ip=self.ip,name='disk1',roles=['role1','role2']) + self.assertTrue('disk1' in self.generator.get_dict()['setup']['storagerouters'][self.ip]['disks']) + self.assertEquals(len(self.generator.get_dict()['setup']['storagerouters'][self.ip]['disks']['disk1']['roles']), 2) + + self.generator._add_domain_to_sr(storagerouter_ip=self.ip, name='domain1') + self.generator._add_domain_to_sr(storagerouter_ip=self.ip, name='domain1',recovery=True) + self.assertEquals(len(self.generator.get_dict()['setup']['storagerouters'][self.ip]['domains']['domain_guids']), 1) + self.assertEquals(len(self.generator.get_dict()['setup']['storagerouters'][self.ip]['domains']['recovery_domain_guids']), 1) + + self.generator._add_domain_to_sr(storagerouter_ip=self.ip, name='domain2') + self.generator._add_domain_to_sr(storagerouter_ip=self.ip, name='domain2',recovery=True) + self.assertEquals(len(self.generator.get_dict()['setup']['storagerouters'][self.ip]['domains']['domain_guids']), 2) + self.assertEquals(len(self.generator.get_dict()['setup']['storagerouters'][self.ip]['domains']['recovery_domain_guids']), 2) + + def test_add_backend(self): + self.generator.add_domain('domain1') + self.generator.add_domain('domain2') + + self.generator.add_backend(name='mybackend', domains=['domain1']) + self.assertItemsEqual(self.generator.get_dict()['setup']['backends'][0].keys(),['name','domains','scaling','presets','osds']) + self.generator.add_backend(name='mybackend_02', domains=['domain1'],scaling='GLOBAL') + self.assertItemsEqual(self.generator.get_dict()['setup']['backends'][1].keys(),['name','domains','scaling','presets','osds']) + + self.generator._add_preset_to_backend(backend_name='mybackend_02',preset_name='mypreset',policies=[1,2,2,1]) + self.assertEqual(self.generator.get_dict()['setup']['backends'][1]['name'],'mybackend_02') + with self.assertRaises(ValueError): + self.generator._add_preset_to_backend(backend_name='non-existing_backend',preset_name='mypreset',policies=[1,2,2,1]) + + self.generator._add_osd_to_backend(backend_name='mybackend',osds_on_disks={self.ip: {'vdb': 2}}) + self.assertEqual(self.generator.get_dict()['setup']['backends'][0]['osds'][self.ip]['vdb'],2) + with self.assertRaises(ValueError): + self.generator._add_osd_to_backend(backend_name='mybackend_02',osds_on_disks={self.ip: {'vdb': 2}}) + self.generator._add_osd_to_backend(backend_name='mybackend_02',linked_backend='mybackend',linked_preset='mypreset') + self.assertEqual(self.generator.get_dict()['setup']['backends'][1]['osds']['mybackend'],'mypreset') + + def test_add_vpool(self): + vpoolname = 'vpool01' + self.generator.add_domain('domain1') + self.generator.add_storagerouter(storagerouter_ip=self.ip, hostname='hostname') + self.generator.add_backend(name='mybackend', domains=['domain1']) + self.generator._add_preset_to_backend(backend_name='mybackend',preset_name='mypreset',policies=[1,2,2,1]) + with self.assertRaises(ValueError): + self.generator.add_vpool(storagerouter_ip=self.ip, vpool_name=vpoolname, backend_name='non-existing_backend',preset='mypreset',storage_ip=self.ip) + with self.assertRaises(ValueError): + self.generator.add_vpool(storagerouter_ip=self.ip, vpool_name=vpoolname, backend_name='mybackend',preset='non-existing_preset',storage_ip=self.ip) + + self.generator.add_vpool(storagerouter_ip=self.ip, vpool_name=vpoolname, backend_name='mybackend',preset='mypreset',storage_ip=self.ip) + self.assertTrue(vpoolname in self.generator.get_dict()['setup']['storagerouters'][self.ip]['vpools'].keys()) + self.assertTrue('storagedriver' in self.generator.get_dict()['setup']['storagerouters'][self.ip]['vpools'][vpoolname].keys()) + + def test_full_flow(self): + self.generator.model_ci('10.100.199.171') + self.generator._add_hypervisor(machine_ip='10.100.69.222', vms = {'10.100.199.171': {'name': 'ubuntu16.04-ovsnode01-setup1', + 'role': 'COMPUTE'}, + '10.100.199.172': {'name': 'ubuntu16.04-ovsnode02-setup1', + 'role': 'VOLDRV'}, + '10.100.199.173': {'name': 'ubuntu16.04-ovsnode03-setup1', + 'role': 'VOLDRV'} + }) + + self.generator.model_scenarios() + self.generator.add_domain('Roubaix') + self.generator.add_domain('Gravelines') + self.generator.add_domain('Strasbourg') + + #### add backends #### + + self.generator.add_backend(name='mybackend', domains=['Roubaix']) + self.generator._add_osd_to_backend(backend_name='mybackend',osds_on_disks={'10.100.199.171': {'sde': 2,'sdf': 2}, + '10.100.199.172': {'sde': 2,'sdf': 2}, + '10.100.199.173': {'sde': 2, 'sdf': 2}}) + self.generator._add_preset_to_backend(backend_name='mybackend',preset_name='mypreset',policies=[[1,2,2,1]]) + + self.generator.add_backend(name='mybackend02',domains=['Gravelines']) + self.generator._add_preset_to_backend(backend_name='mybackend02',preset_name='mypreset',policies=[[1,2,2,1]]) + self.generator._add_osd_to_backend(backend_name='mybackend02',osds_on_disks={'10.100.199.171': {'sdg': 2}, + '10.100.199.172': {'sdg': 2}, + '10.100.199.173': {'sdg': 2}}) + + self.generator.add_backend(name='mybackend-global',domains=['Roubaix','Gravelines','Strasbourg'],scaling='GLOBAL') + self.generator._add_preset_to_backend(backend_name='mybackend-global',preset_name='mypreset',policies=[[1,2,2,1]]) + self.generator._add_osd_to_backend(backend_name='mybackend-global',linked_backend='mybackend',linked_preset='mypreset') + self.generator._add_osd_to_backend(backend_name='mybackend-global',linked_backend='mybackend02',linked_preset='mypreset') + + #### add storagerouter 1 + + self.generator.add_storagerouter(storagerouter_ip='10.100.199.171', hostname='ovs-node-1-1604') + self.generator._add_domain_to_sr(storagerouter_ip='10.100.199.171',name='Roubaix') + self.generator._add_domain_to_sr(storagerouter_ip='10.100.199.171',name='Gravelines',recovery=True) + self.generator._add_domain_to_sr(storagerouter_ip='10.100.199.171',name='Strasbourg',recovery=True) + + self.generator._add_disk_to_sr(storagerouter_ip='10.100.199.171',name='sda',roles=['WRITE','DTL']) + self.generator._add_disk_to_sr(storagerouter_ip='10.100.199.171',name='sdb',roles=['DB']) + self.generator._add_disk_to_sr(storagerouter_ip='10.100.199.171',name='sdc',roles=['SCRUB']) + + self.generator.add_vpool(storagerouter_ip='10.100.199.171', vpool_name='myvpool01', backend_name='mybackend-global',preset='mypreset',storage_ip='10.100.199.171') + self.generator._change_cache(storagerouter_ip='10.100.199.171',vpool='myvpool01',block_cache=True,fragment_cache=False,on_write=False) + self.generator._change_cache(storagerouter_ip='10.100.199.171',vpool='myvpool01',fragment_cache=True,block_cache=False,on_read=False,on_write=True) + + + #### add storagerouter2 + + self.generator.add_storagerouter(storagerouter_ip='10.100.199.172', hostname='ovs-node-2-1604') + self.generator._add_domain_to_sr(storagerouter_ip='10.100.199.172',name='Gravelines') + self.generator._add_domain_to_sr(storagerouter_ip='10.100.199.172',name='Roubaix',recovery=True) + self.generator._add_domain_to_sr(storagerouter_ip='10.100.199.172',name='Strasbourg',recovery=True) + + self.generator._add_disk_to_sr(storagerouter_ip='10.100.199.172',name='sda',roles=['WRITE','DTL']) + self.generator._add_disk_to_sr(storagerouter_ip='10.100.199.172',name='sdb',roles=['DB']) + self.generator._add_disk_to_sr(storagerouter_ip='10.100.199.172',name='sdc',roles=['SCRUB']) + + self.generator.add_vpool(storagerouter_ip='10.100.199.172', vpool_name='myvpool01', backend_name='mybackend-global',preset='mypreset',storage_ip='10.100.199.172') + self.generator._change_cache(storagerouter_ip='10.100.199.172',vpool='myvpool01',fragment_cache=True,block_cache=True,on_write=False, on_read=True) + + + + #### add storagerouter 3 + + self.generator.add_storagerouter(storagerouter_ip='10.100.199.173', hostname='ovs-node-3-1604') + self.generator._add_domain_to_sr(storagerouter_ip='10.100.199.173',name='Gravelines') + self.generator._add_domain_to_sr(storagerouter_ip='10.100.199.173',name='Roubaix',recovery=True) + self.generator._add_domain_to_sr(storagerouter_ip='10.100.199.173',name='Strasbourg',recovery=True) + + + self.generator._add_disk_to_sr(storagerouter_ip='10.100.199.173',name='sda',roles=['WRITE','DTL']) + self.generator._add_disk_to_sr(storagerouter_ip='10.100.199.173',name='sdb',roles=['DB']) + self.generator._add_disk_to_sr(storagerouter_ip='10.100.199.173',name='sdc',roles=['SCRUB']) + self.generator.add_vpool(storagerouter_ip='10.100.199.173', vpool_name='myvpool01', backend_name='mybackend-global',preset='mypreset',storage_ip='10.100.199.173') + + + expected_output = {u'ci': {u'cleanup': False, + u'config_manager': u'arakoon', + u'fail_on_failed_scenario': True, + u'grid_ip': u'10.100.199.171', + u'hypervisor': {u'ip': u'10.100.69.222', + u'password': u'rooter', + u'type': u'KVM', + u'user': u'root', + u'vms': {u'10.100.199.171': {u'name': u'ubuntu16.04-ovsnode01-setup1', + u'role': u'COMPUTE'}, + u'10.100.199.172': {u'name': u'ubuntu16.04-ovsnode02-setup1', + u'role': u'VOLDRV'}, + u'10.100.199.173': {u'name': u'ubuntu16.04-ovsnode03-setup1', + u'role': u'VOLDRV'}} + }, + u'local_hypervisor': {u'password': u'rooter', + u'type': u'KVM', + u'user': u'root'}, + u'scenario_retries': 1, + u'scenarios': True, + u'send_to_testrail': True, + u'setup': True, + u'setup_retries': 1, + u'user': {u'api': {u'password': u'admin', u'username': u'admin'}, + u'shell': {u'password': u'rooter', u'username': u'root'}}, + u'validation': True, + u'version': u'fargo'}, + u'scenarios': [u'ALL'], + u'setup': {u'backends': [{u'domains': {u'domain_guids': [u'Roubaix']}, + u'name': u'mybackend', + u'osds': {u'10.100.199.171': {u'sde': 2, u'sdf': 2}, + u'10.100.199.172': {u'sde': 2, u'sdf': 2}, + u'10.100.199.173': {u'sde': 2, u'sdf': 2}}, + u'presets': [{u'compression': u'snappy', + u'encryption': u'none', + u'fragment_size': 2097152, + u'name': u'mypreset', + u'policies': [[1, 2, 2, 1]]}], + u'scaling': u'LOCAL'}, + {u'domains': {u'domain_guids': [u'Gravelines']}, + u'name': u'mybackend02', + u'osds': {u'10.100.199.171': {u'sdg': 2}, + u'10.100.199.172': {u'sdg': 2}, + u'10.100.199.173': {u'sdg': 2}}, + u'presets': [{u'compression': u'snappy', + u'encryption': u'none', + u'fragment_size': 2097152, + u'name': u'mypreset', + u'policies': [[1, 2, 2, 1]]}], + u'scaling': u'LOCAL'}, + {u'domains': {u'domain_guids': [u'Roubaix', u'Gravelines', u'Strasbourg']}, + u'name': u'mybackend-global', + u'osds': {u'mybackend': u'mypreset', u'mybackend02': u'mypreset'}, + u'presets': [{u'compression': u'snappy', + u'encryption': u'none', + u'fragment_size': 2097152, + u'name': u'mypreset', + u'policies': [[1, 2, 2, 1]]}], + u'scaling': u'GLOBAL'}], + u'domains': [u'Roubaix', u'Gravelines', u'Strasbourg'], + u'storagerouters': {u'10.100.199.171': {u'disks': {u'sda': {u'roles': [u'WRITE', + u'DTL']}, + u'sdb': {u'roles': [u'DB']}, + u'sdc': {u'roles': [u'SCRUB']}}, + u'domains': {u'domain_guids': [u'Roubaix'], + u'recovery_domain_guids': [u'Gravelines', u'Strasbourg']}, + u'hostname': u'ovs-node-1-1604', + u'vpools': {u'myvpool01': {u'backend_name': u'mybackend-global', + u'block_cache': {u'location': u'disk', + u'strategy': {u'cache_on_read': True, u'cache_on_write': False}}, + u'fragment_cache': {u'location': u'disk', + u'strategy': {u'cache_on_read': False, u'cache_on_write': True}}, + u'preset': u'mypreset', + u'proxies': 1, + u'storage_ip': u'10.100.199.171', + u'storagedriver': {u'cluster_size': 4, + u'dtl_mode': u'sync', + u'dtl_transport': u'tcp', + u'global_write_buffer': 20, + u'global_read_buffer': 0, + u'deduplication': "non_dedupe", + u'strategy': "none", + u'sco_size': 4, + u'volume_write_buffer': 512}}}}, + u'10.100.199.172': {u'disks': {u'sda': {u'roles': [u'WRITE', u'DTL']}, + u'sdb': {u'roles': [u'DB']}, + u'sdc': {u'roles': [u'SCRUB']}}, + u'domains': {u'domain_guids': [u'Gravelines'], + u'recovery_domain_guids': [u'Roubaix', u'Strasbourg']}, + u'hostname': u'ovs-node-2-1604', + u'vpools': {u'myvpool01': {u'backend_name': u'mybackend-global', + u'block_cache': {u'location': u'disk', + u'strategy': {u'cache_on_read': True, u'cache_on_write': False}}, + u'fragment_cache': {u'location': u'disk', + u'strategy': {u'cache_on_read': True, u'cache_on_write': False}}, + u'preset': u'mypreset', + u'proxies': 1, + u'storage_ip': u'10.100.199.172', + u'storagedriver': {u'cluster_size': 4, + u'dtl_mode': u'sync', + u'dtl_transport': u'tcp', + u'global_write_buffer': 20, + u'global_read_buffer': 0, + u'deduplication': "non_dedupe", + u'strategy': "none", + u'sco_size': 4, + u'volume_write_buffer': 512}}}}, + u'10.100.199.173': {u'disks': {u'sda': {u'roles': [u'WRITE', u'DTL']}, + u'sdb': {u'roles': [u'DB']}, + u'sdc': {u'roles': [u'SCRUB']}}, + u'domains': {u'domain_guids': [u'Gravelines'], + u'recovery_domain_guids': [u'Roubaix', u'Strasbourg']}, + u'hostname': u'ovs-node-3-1604', + u'vpools': {u'myvpool01': {u'backend_name': u'mybackend-global', + u'block_cache': {u'location': u'disk', + u'strategy': {u'cache_on_read': False, u'cache_on_write': False}}, + u'fragment_cache': {u'location': u'disk', + u'strategy': {u'cache_on_read': False, u'cache_on_write': False}}, + u'preset': u'mypreset', + u'proxies': 1, + u'storage_ip': u'10.100.199.173', + u'storagedriver': {u'cluster_size': 4, + u'dtl_mode': u'sync', + u'dtl_transport': u'tcp', + u'global_write_buffer': 20, + u'global_read_buffer': 0, + u'deduplication': "non_dedupe", + u'strategy': "none", + u'sco_size': 4, + u'volume_write_buffer': 512}}}} + } + } + } + + + self.assertDictEqual(self.generator.get_dict(),expected_output) + + +if __name__ == '__main__': + unittest.main() From 49a7fc0865b9c0fb87ab1803da9d68d70d16e6bb Mon Sep 17 00:00:00 2001 From: jeroenmaelbrancke Date: Tue, 21 Nov 2017 13:58:14 +0100 Subject: [PATCH 07/28] Added information --- helpers/hypervisor/apis/kvm/sdk.py | 23 ++++++++++++++++++-- helpers/hypervisor/hypervisors/vmware.py | 27 ++++++++++++------------ 2 files changed, 35 insertions(+), 15 deletions(-) diff --git a/helpers/hypervisor/apis/kvm/sdk.py b/helpers/hypervisor/apis/kvm/sdk.py index 10fd4bc..ac19e87 100644 --- a/helpers/hypervisor/apis/kvm/sdk.py +++ b/helpers/hypervisor/apis/kvm/sdk.py @@ -674,8 +674,28 @@ def create_vm(self, name, vcpus, ram, disks, cdrom_iso=None, os_type=None, os_va print ' '.join(command+options) raise RuntimeError(msg) - def create_vm_from_cloud_init(self, name, vcpus, ram, bridge, ip, netmask, gateway, nameserver, amount_disks, size, + def create_vm_from_cloud_init(self, name, vcpus, ram, boot_disk_size, bridge, ip, netmask, gateway, nameserver, amount_disks, size, mountpoint, cloud_init_url, cloud_init_name, root_password, force=False): + """ + Create vm from cloud init + :param name: Name of the vm + :param vcpus: amount of vcpus + :param ram: amount of ram (MB) + :param boot_disk_size: size of the boot disks (notation xGB) + :param bridge: network bridge name + :param ip: ip of the vm + :param netmask: netmask + :param gateway: gateway + :param nameserver: dns ip + :param amount_disks: amount of extra disks + :param size: size of the extra disks (notation xGB) + :param mountpoint: where the extra disks should be created + :param cloud_init_url: cloud init url + :param cloud_init_name: vmdk template name + :param root_password: root password of the vm + :param force: remove vm with the same name or used disks + :return: + """ template_directory = '/var/lib/libvirt/images' vmdk_file = "{0}/{1}.vmdk".format(template_directory, cloud_init_name) @@ -692,7 +712,6 @@ def create_vm_from_cloud_init(self, name, vcpus, ram, bridge, ip, netmask, gatew meta_data = "{0}/meta-data".format(vm_directory) ci_iso = "{0}/{1}.iso".format(vm_directory, name) boot_disk = "{0}/{1}.qcow2".format(vm_directory, name) - boot_disk_size = "30G" meta_data_lines = [ 'instance-id: {0}'.format(uuid.uuid1()), diff --git a/helpers/hypervisor/hypervisors/vmware.py b/helpers/hypervisor/hypervisors/vmware.py index db8df01..dc68f39 100644 --- a/helpers/hypervisor/hypervisors/vmware.py +++ b/helpers/hypervisor/hypervisors/vmware.py @@ -51,28 +51,29 @@ def create_vm_from_template(self, name, source_vm, disks, ip, mountpoint, wait=T return task_info.info.result.value return None - def create_vm_from_cloud_init(self, name, vcpus, ram, bridge, ip, netmask, gateway, nameserver, amount_disks, size, + def create_vm_from_cloud_init(self, name, vcpus, ram, boot_disk_size, bridge, ip, netmask, gateway, nameserver, amount_disks, size, mountpoint, cloud_init_url, cloud_init_name, root_password, force=False): """ - create vm from cloud init - :param name: name of the vm - :param vcpus: amount of vCPUs - :param ram: amount of ram - :param bridge: network bridge - :param ip: ip address + Create vm from cloud init + :param name: Name of the vm + :param vcpus: amount of vcpus + :param ram: amount of ram (MB) + :param boot_disk_size: size of the boot disks (notation xGB) + :param bridge: network bridge name + :param ip: ip of the vm :param netmask: netmask :param gateway: gateway - :param nameserver: nameserver + :param nameserver: dns ip :param amount_disks: amount of extra disks - :param size: the size of the extra disks - :param mountpoint: mountpoint where the extra disks will be stored + :param size: size of the extra disks (notation xGB) + :param mountpoint: where the extra disks should be created :param cloud_init_url: cloud init url - :param cloud_init_name: cloud init name + :param cloud_init_name: vmdk template name :param root_password: root password of the vm - :param force: force delete existing vms with this name + :param force: remove vm with the same name or used disks :return: """ - return self.sdk.create_vm_from_cloud_init(name, vcpus, ram, bridge, ip, netmask, gateway, nameserver, + return self.sdk.create_vm_from_cloud_init(name, vcpus, ram, boot_disk_size, bridge, ip, netmask, gateway, nameserver, amount_disks, size, mountpoint, cloud_init_url, cloud_init_name, root_password, force) From 3cac118845df3782eabb852c8e0ae31b22e4a279 Mon Sep 17 00:00:00 2001 From: jeroenmaelbrancke Date: Tue, 21 Nov 2017 14:03:28 +0100 Subject: [PATCH 08/28] Fixed the hypervisor method. Removed the vmware sdk call. (return None) --- helpers/hypervisor/hypervisors/kvm.py | 27 ++++++++++++------------ helpers/hypervisor/hypervisors/vmware.py | 4 +--- 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/helpers/hypervisor/hypervisors/kvm.py b/helpers/hypervisor/hypervisors/kvm.py index 3067bf1..6a43231 100644 --- a/helpers/hypervisor/hypervisors/kvm.py +++ b/helpers/hypervisor/hypervisors/kvm.py @@ -46,28 +46,29 @@ def create_vm_from_template(self, name, source_vm, disks, ip, mountpoint, wait=T _ = ip, wait # For compatibility purposes only return self.sdk.create_vm_from_template(name, source_vm, disks, mountpoint) - def create_vm_from_cloud_init(self, name, vcpus, ram, bridge, ip, netmask, gateway, nameserver, amount_disks, size, + def create_vm_from_cloud_init(self, name, vcpus, ram, boot_disk_size, bridge, ip, netmask, gateway, nameserver, amount_disks, size, mountpoint, cloud_init_url, cloud_init_name, root_password, force=False): """ - create vm from cloud init - :param name: name of the vm - :param vcpus: amount of vCPUs - :param ram: amount of ram - :param bridge: network bridge - :param ip: ip address + Create vm from cloud init + :param name: Name of the vm + :param vcpus: amount of vcpus + :param ram: amount of ram (MB) + :param boot_disk_size: size of the boot disks (notation xGB) + :param bridge: network bridge name + :param ip: ip of the vm :param netmask: netmask :param gateway: gateway - :param nameserver: nameserver + :param nameserver: dns ip :param amount_disks: amount of extra disks - :param size: the size of the extra disks - :param mountpoint: mountpoint where the extra disks will be stored + :param size: size of the extra disks (notation xGB) + :param mountpoint: where the extra disks should be created :param cloud_init_url: cloud init url - :param cloud_init_name: cloud init name + :param cloud_init_name: vmdk template name :param root_password: root password of the vm - :param force: force delete existing vms with this name + :param force: remove vm with the same name or used disks :return: """ - return self.sdk.create_vm_from_cloud_init(name, vcpus, ram, bridge, ip, netmask, gateway, nameserver, + return self.sdk.create_vm_from_cloud_init(name, vcpus, ram, boot_disk_size, bridge, ip, netmask, gateway, nameserver, amount_disks, size, mountpoint, cloud_init_url, cloud_init_name, root_password, force) diff --git a/helpers/hypervisor/hypervisors/vmware.py b/helpers/hypervisor/hypervisors/vmware.py index dc68f39..75ecf25 100644 --- a/helpers/hypervisor/hypervisors/vmware.py +++ b/helpers/hypervisor/hypervisors/vmware.py @@ -73,9 +73,7 @@ def create_vm_from_cloud_init(self, name, vcpus, ram, boot_disk_size, bridge, ip :param force: remove vm with the same name or used disks :return: """ - return self.sdk.create_vm_from_cloud_init(name, vcpus, ram, boot_disk_size, bridge, ip, netmask, gateway, nameserver, - amount_disks, size, mountpoint, cloud_init_url, cloud_init_name, - root_password, force) + return None def clone_vm(self, vmid, name, disks, mountpoint, wait=False): """ From 9918903cce0227a28d4fb999c521c16b799bd113 Mon Sep 17 00:00:00 2001 From: jeroenmaelbrancke Date: Tue, 21 Nov 2017 15:06:42 +0100 Subject: [PATCH 09/28] Fixed the remarks. --- ci/__init__.py | 19 ++++++++++++++ helpers/hypervisor/apis/kvm/sdk.py | 25 ++++++++++--------- helpers/hypervisor/hypervisors/vmware.py | 2 +- .../openvstorage-automation-lib.install | 1 + .../openvstorage-automation-lib.postinst | 3 --- 5 files changed, 34 insertions(+), 16 deletions(-) create mode 100644 ci/__init__.py delete mode 100644 packaging/debian/debian/openvstorage-automation-lib.postinst diff --git a/ci/__init__.py b/ci/__init__.py new file mode 100644 index 0000000..8ae97b6 --- /dev/null +++ b/ci/__init__.py @@ -0,0 +1,19 @@ +# Copyright (C) 2016 iNuron NV +# +# This file is part of Open vStorage Open Source Edition (OSE), +# as available from +# +# http://www.openvstorage.org and +# http://www.openvstorage.com. +# +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU Affero General Public License v3 (GNU AGPLv3) +# as published by the Free Software Foundation, in version 3 as it comes +# in the LICENSE.txt file of the Open vStorage OSE distribution. +# +# Open vStorage is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY of any kind. + +""" +Init +""" diff --git a/helpers/hypervisor/apis/kvm/sdk.py b/helpers/hypervisor/apis/kvm/sdk.py index ac19e87..e01717b 100644 --- a/helpers/hypervisor/apis/kvm/sdk.py +++ b/helpers/hypervisor/apis/kvm/sdk.py @@ -783,14 +783,15 @@ def create_vm_from_cloud_init(self, name, vcpus, ram, boot_disk_size, bridge, ip self.ssh_client.run(["genisoimage", "-output", ci_iso, "-volid", "cidata", "-joliet", "-r", user_data, meta_data]) # Create extra disks - extra_disks = [] + all_disks = [{'mountpoint': boot_disk, "format": "qcow2", "bus": "virtio"}] + if amount_disks > 0 and size > 0: if not self.ssh_client.dir_exists(mountpoint): raise Exception("Directory {0} doesn't exists.".format(mountpoint)) for i in xrange(1, amount_disks+1): - disk_path = "%s/%s_%02d.qcow2" % (mountpoint, name, i,) - exists, used_disk, vm_name = self._check_disks_in_use(disk_path) + disk_path = "{0}/{1}_{2:02d}.qcow2".format(mountpoint, name, i,) + exists, used_disk, vm_name = self._check_disks_in_use([disk_path]) disk_exists_filesystem = self.ssh_client.file_exists(disk_path) if disk_exists_filesystem and exists: raise Exception("Virtual Disk {0} in used by {1}".format(used_disk, vm_name)) @@ -798,19 +799,14 @@ def create_vm_from_cloud_init(self, name, vcpus, ram, boot_disk_size, bridge, ip self.ssh_client.file_delete(disk_path) self.ssh_client.run(['qemu-img', 'create', '-f', 'qcow2', disk_path, size]) - extra_disks.append(disk_path) - - all_disks = [{'mountpoint': boot_disk, "format": "qcow2", "bus": "virtio"}] - - for extra_disk in extra_disks: - all_disks.append({'mountpoint': extra_disk, "format": "qcow2", "bus": "virtio"}) + all_disks.append({'mountpoint': disk_path, "format": "qcow2", "bus": "virtio"}) self.create_vm(name=name, vcpus=vcpus, ram=ram, disks=all_disks, cdrom_iso=ci_iso, networks=[{"bridge": bridge, "model": "virtio"}], start=True) def _check_disks_in_use(self, disk_paths): """ - check if disks are in used + Check if disks are in used :param disks: list of disk paths :type disks: list :return: bool @@ -823,8 +819,13 @@ def _check_disks_in_use(self, disk_paths): continue used_disk = disk.find('source').get('file') if used_disk in disk_paths: - return True, used_disk, dom_info.find('name').text - raise Exception("Virtual Disk {0} in used by {1}".format(used_disk, dom_info.find('name').text)) + try: + return True, used_disk, dom_info.find('name').text + except AttributeError as ex: + msg = "Error during checking of VM's disks. Got {0}".format(str(ex)) + logger.exception(msg) + return True, used_disk, 'Unknown vm name' + return False, '', '' @staticmethod diff --git a/helpers/hypervisor/hypervisors/vmware.py b/helpers/hypervisor/hypervisors/vmware.py index 75ecf25..79a22f2 100644 --- a/helpers/hypervisor/hypervisors/vmware.py +++ b/helpers/hypervisor/hypervisors/vmware.py @@ -73,7 +73,7 @@ def create_vm_from_cloud_init(self, name, vcpus, ram, boot_disk_size, bridge, ip :param force: remove vm with the same name or used disks :return: """ - return None + raise NotImplementedError def clone_vm(self, vmid, name, disks, mountpoint, wait=False): """ diff --git a/packaging/debian/debian/openvstorage-automation-lib.install b/packaging/debian/debian/openvstorage-automation-lib.install index 283228f..cc846c8 100644 --- a/packaging/debian/debian/openvstorage-automation-lib.install +++ b/packaging/debian/debian/openvstorage-automation-lib.install @@ -1,3 +1,4 @@ +ci opt/OpenvStorage/ci helpers opt/OpenvStorage/ci/api_lib/ setup opt/OpenvStorage/ci/api_lib/ remove opt/OpenvStorage/ci/api_lib/ diff --git a/packaging/debian/debian/openvstorage-automation-lib.postinst b/packaging/debian/debian/openvstorage-automation-lib.postinst deleted file mode 100644 index 3e82486..0000000 --- a/packaging/debian/debian/openvstorage-automation-lib.postinst +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -touch /opt/OpenvStorage/ci/__init__.py From f668ed5e4d9e0ec5d09f23ee6b201c8aeedca97c Mon Sep 17 00:00:00 2001 From: jeroenmaelbrancke Date: Tue, 21 Nov 2017 15:11:08 +0100 Subject: [PATCH 10/28] Fixed the remark. --- packaging/debian/debian/openvstorage-automation-lib.install | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/debian/debian/openvstorage-automation-lib.install b/packaging/debian/debian/openvstorage-automation-lib.install index cc846c8..b6ce271 100644 --- a/packaging/debian/debian/openvstorage-automation-lib.install +++ b/packaging/debian/debian/openvstorage-automation-lib.install @@ -1,4 +1,4 @@ -ci opt/OpenvStorage/ci +ci opt/OpenvStorage/ci/ helpers opt/OpenvStorage/ci/api_lib/ setup opt/OpenvStorage/ci/api_lib/ remove opt/OpenvStorage/ci/api_lib/ From c8d9db501526830bc52d2dbb34efc105d8d81ccd Mon Sep 17 00:00:00 2001 From: simon Date: Thu, 23 Nov 2017 16:58:17 +0100 Subject: [PATCH 11/28] Added unlink backend method --- remove/backend.py | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/remove/backend.py b/remove/backend.py index ca63e78..0aa4fc2 100644 --- a/remove/backend.py +++ b/remove/backend.py @@ -336,3 +336,39 @@ def unlink_backend(cls, globalbackend_name, albabackend_name, timeout=UNLINK_BAC BackendRemover.LOGGER.info("Unlinking backend `{0}` from global backend `{1}` should have succeeded" .format(albabackend_name, globalbackend_name)) return task_result[0] + + + @classmethod + #@required_backend + def unlink_backend(cls, globalbackend_name, albabackend_name, timeout=UNLINK_BACKEND_TIMEOUT): + """ + Link a LOCAL backend to a GLOBAL backend + + :param globalbackend_name: name of a GLOBAL alba backend + :type globalbackend_name: str + :param albabackend_name: name of a backend to unlink + :type albabackend_name: str + :param timeout: timeout counter in seconds + :type timeout: int + :return: + """ + data = { + "linked_guid": BackendHelper.get_alba_backend_guid_by_name(albabackend_name) + } + + task_guid = cls.api.post( + api='/alba/backends/{0}/unlink_alba_backends' + .format(BackendHelper.get_alba_backend_guid_by_name(globalbackend_name)), + data=data + ) + + task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) + if not task_result[0]: + error_msg = "Unlinking backend `{0}` from global backend `{1}` has failed with error '{2}'".format( + albabackend_name, globalbackend_name, task_result[1]) + BackendRemover.LOGGER.error(error_msg) + raise RuntimeError(error_msg) + else: + BackendRemover.LOGGER.info("Unlinking backend `{0}` from global backend `{1}` should have succeeded" + .format(albabackend_name, globalbackend_name)) + return task_result[0] From 0d7d6b1b73ea767ce6347254ff5c981d32efce93 Mon Sep 17 00:00:00 2001 From: Simon Van den Bossche Date: Tue, 5 Dec 2017 17:06:28 +0100 Subject: [PATCH 12/28] setup + cleanup --- helpers/albanode.py | 4 +- helpers/backend.py | 5 +- helpers/ci_constants.py | 101 ++++ helpers/disk.py | 2 +- helpers/fstab.py | 71 +-- helpers/setup_json_generator.py | 297 ---------- helpers/setupjsongenerator.py | 675 +++++++++++++++++++++++ helpers/storagerouter.py | 8 +- helpers/tests/json_generator_testcase.py | 313 ----------- helpers/tests/jsongeneratortestcase.py | 369 +++++++++++++ helpers/vdisk.py | 3 +- remove/backend.py | 2 +- remove/roles.py | 47 +- remove/vdisk.py | 6 +- remove/vpool.py | 2 +- setup/backend.py | 8 +- setup/domain.py | 5 +- setup/roles.py | 4 +- setup/vdisk.py | 6 +- setup/vpool.py | 4 +- validate/backend.py | 9 +- 21 files changed, 1250 insertions(+), 691 deletions(-) create mode 100644 helpers/ci_constants.py delete mode 100644 helpers/setup_json_generator.py create mode 100644 helpers/setupjsongenerator.py delete mode 100644 helpers/tests/json_generator_testcase.py create mode 100644 helpers/tests/jsongeneratortestcase.py diff --git a/helpers/albanode.py b/helpers/albanode.py index 8ed76a0..453a234 100644 --- a/helpers/albanode.py +++ b/helpers/albanode.py @@ -13,10 +13,11 @@ # # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. -from ci.scenario_helpers.ci_constants import CIConstants + from ovs.dal.hybrids.albanode import AlbaNode from ovs.dal.lists.albanodelist import AlbaNodeList from ovs.extensions.generic.logger import Logger +from ..helpers.ci_constants import CIConstants class AlbaNodeHelper(CIConstants): @@ -32,6 +33,7 @@ def _map_alba_nodes(cls): """ Will map the alba_node_id with its guid counterpart and return the map dict """ + mapping = {} options = { 'contents': 'node_id,_relations', diff --git a/helpers/backend.py b/helpers/backend.py index b57378e..6733923 100644 --- a/helpers/backend.py +++ b/helpers/backend.py @@ -14,12 +14,13 @@ # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. -from ci.scenario_helpers.ci_constants import CIConstants + +from ovs.dal.hybrids.albabackend import AlbaBackend from ovs.dal.lists.albabackendlist import AlbaBackendList from ovs.dal.lists.backendlist import BackendList from ovs.dal.lists.backendtypelist import BackendTypeList -from ovs.dal.hybrids.albabackend import AlbaBackend from ovs.extensions.generic.logger import Logger +from ..helpers.ci_constants import CIConstants from ..helpers.exceptions import PresetNotFoundError, AlbaBackendNotFoundError diff --git a/helpers/ci_constants.py b/helpers/ci_constants.py new file mode 100644 index 0000000..7b452fd --- /dev/null +++ b/helpers/ci_constants.py @@ -0,0 +1,101 @@ +# Copyright (C) 2016 iNuron NV +# +# This file is part of Open vStorage Open Source Edition (OSE), +# as available from +# +# http://www.openvstorage.org and +# http://www.openvstorage.com. +# +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU Affero General Public License v3 (GNU AGPLv3) +# as published by the Free Software Foundation, in version 3 as it comes +# in the LICENSE.txt file of the Open vStorage OSE distribution. +# +# Open vStorage is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY of any kind. +import json +from ci.api_lib.helpers.api import OVSClient + + +CONFIG_LOC = "/opt/OpenvStorage/ci/config/setup.json" +TEST_SCENARIO_LOC = "/opt/OpenvStorage/ci/scenarios/" +SETTINGS_LOC = "/opt/OpenvStorage/ci/config/settings.json" +TESTTRAIL_LOC = "/opt/OpenvStorage/ci/config/testtrail.json" + + +class CIConstants(object): + """ + Collection of multiple constants and constant related instances + """ + + FIO_BIN = {'url': 'http://www.include.gr/fio.bin.latest', 'location': '/tmp/fio.bin.latest'} + FIO_BIN_EE = {'url': 'http://www.include.gr/fio.bin.latest.ee', 'location': '/tmp/fio.bin.latest'} + + with open(CONFIG_LOC, 'r') as JSON_CONFIG: + SETUP_CFG = json.load(JSON_CONFIG) + + with open(SETTINGS_LOC, 'r') as JSON_SETTINGS: + SETTINGS = json.load(JSON_SETTINGS) + + DATA_TEST_CASES = [(0, 100), (30, 70), (40, 60), (50, 50), (70, 30), (100, 0)] # read write patterns to test (read, write) + + CLOUD_INIT_DATA = { + 'script_loc': 'https://raw.githubusercontent.com/kinvaris/cloud-init/master/create-config-drive', + 'script_dest': '/tmp/cloud_init_script.sh', + 'user-data_loc': '/tmp/user-data-migrate-test', + 'config_dest': '/tmp/cloud-init-config-migrate-test' + } + + # collect details about parent hypervisors + PARENT_HYPERVISOR_INFO = SETUP_CFG['ci'].get('hypervisors') + + # hypervisor details + HYPERVISOR_TYPE = SETUP_CFG['ci']['local_hypervisor']['type'] + HYPERVISOR_USER = SETUP_CFG['ci']['local_hypervisor']['user'] + HYPERVISOR_PASSWORD = SETUP_CFG['ci']['local_hypervisor']['password'] + + HYPERVISOR_INFO = {'type': HYPERVISOR_TYPE, + 'user': HYPERVISOR_USER, + 'password': HYPERVISOR_PASSWORD} + + VM_USERNAME = 'root' # vm credentials & details + VM_PASSWORD = 'rooter' + VM_VCPUS = 4 + VM_VRAM = 1024 # In MB + VM_OS_TYPE = 'ubuntu16.04' + + VM_WAIT_TIME = 300 # wait time before timing out on the vm install in seconds + + VDISK_THREAD_LIMIT = 5 # Each monitor thread queries x amount of vdisks + FIO_VDISK_LIMIT = 50 # Each fio uses x disks + + IO_REFRESH_RATE = 5 # Refresh rate used for polling IO + AMOUNT_TO_WRITE = 1 * 1024 ** 3 # Amount of data to RW to produce IO + + HA_TIMEOUT = 300 + + + class classproperty(property): + def __get__(self, cls, owner): + return classmethod(self.fget).__get__(None, owner)() + + @classproperty + def api(cls): + return OVSClient(cls.SETUP_CFG['ci']['grid_ip'], + cls.SETUP_CFG['ci']['user']['api']['username'], + cls.SETUP_CFG['ci']['user']['api']['password']) + + + def __init__(self): + super(CIConstants, self).__init__() + + + @classmethod + def get_shell_user(cls): + """ + Gets the user configured within the setup + :return: dict with the users credentials + :rtype: dict + """ + return {'username': cls.SETUP_CFG['ci']['user']['shell']['username'], + 'password': cls.SETUP_CFG['ci']['user']['shell']['password']} \ No newline at end of file diff --git a/helpers/disk.py b/helpers/disk.py index e215429..c589f0c 100644 --- a/helpers/disk.py +++ b/helpers/disk.py @@ -87,8 +87,8 @@ def get_roles_from_disk(storagerouter_guid, disk_name): :return: list of roles of all partitions on a certain disk :rtype: list """ - disk = DiskHelper.get_disk_by_diskname(storagerouter_guid, disk_name) + roles_on_disk = [] if disk: for diskpartition in disk.partitions: diff --git a/helpers/fstab.py b/helpers/fstab.py index efff943..40dac4c 100644 --- a/helpers/fstab.py +++ b/helpers/fstab.py @@ -14,6 +14,9 @@ # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. import inspect +from ovs.extensions.generic.sshclient import SSHClient +from ovs.extensions.generic.system import System + class FstabHelper(): """ @@ -43,6 +46,9 @@ def __init__(self, device, mountpoint, filesystem, options, d=0, p=0): def __eq__(self, o): return str(self) == str(o) + def __ne__(self, o): + return str(self) != str(o) + def __str__(self): return "{} {} {} {} {} {}".format(self.device, self.mountpoint, self.filesystem, self.options, self.d, self.p) @@ -70,11 +76,19 @@ def get(self, item): _path = DEFAULT_PATH - def __init__(cls, path=None): + def __init__(self, path=None, client=None): + """ + + :param path: path of the fstab file + :type path: str + """ if path: - cls._path = path + self._path = path else: - cls._path = cls.DEFAULT_PATH + self._path = self.DEFAULT_PATH + if client is None: + client = SSHClient(System.get_my_storagerouter(), username='root') + self.client = client @staticmethod @@ -87,58 +101,48 @@ def _hydrate_entry(line): """ return FstabHelper.Entry(*filter(lambda x: x not in ('',' ', None), str(line).strip("\n").split(" "))) - @classmethod - def get_entry_by_attr(cls, attr, value): + def get_entry_by_attr(self, attr, value): """ - Returns an entry with where a attr has a specific value + Returns an entry with where an attr has a specific value :param attr: attribute from the entry :param value: value that the attribute should have :return: """ - entries = [] - with open(cls._path, 'r') as fh: - for line in fh: - try: - if not line.startswith("#") and line.strip() is not '': - entries.append(cls._hydrate_entry(line)) - except ValueError: - pass - + for line in self.client.file_read(self._path).strip().splitlines(): + try: + if not line.startswith("#") and line.strip() is not '': + entries.append(self._hydrate_entry(line)) + except ValueError: + pass for entry in entries: e_attr = entry.get(attr) if e_attr == value: return entry return None - @classmethod - def remove_entry(cls, entry): + def remove_entry(self, entry): """ Removes a line from fstab :param entry:entry object :return: """ - with open(cls._path, 'r+') as fh: - d = fh.readlines() - fh.seek(0) - for line in d: - if line.strip() != entry and not line.startswith('#'): - fh.write(line) - fh.truncate() - - @classmethod - def remove_by_mountpoint(cls, mountpoint): + lines = self.client.file_read(self._path).strip().splitlines() + lines = [line for line in lines if not line.startswith('#') and self._hydrate_entry(line) != entry] + self.client.file_write(self._path, '\n'.join(lines)) + + def remove_by_mountpoint(self, mountpoint, client=None): """ Removes an entry by specific mountpoint :param mountpoint: mountpoint :return: """ - entry = cls.get_entry_by_attr('mountpoint', mountpoint) + + entry = self.get_entry_by_attr('mountpoint', mountpoint) if entry: - cls.remove_entry(entry) + self.remove_entry(entry) - @classmethod - def add(cls, device, mountpoint, filesystem, options=None, dump=None, pass_=None): + def add(self, device, mountpoint, filesystem, options=None, dump=None, pass_=None): """ Adds a entry based on supplied params :param device: devicename eg /dev/sda @@ -149,5 +153,6 @@ def add(cls, device, mountpoint, filesystem, options=None, dump=None, pass_=None :param pass_: order to check filesystem at reboot time :return: """ - with open(cls._path, 'a+') as fh: - fh.write(str(FstabHelper.Entry(device, mountpoint, filesystem, options, dump))+'\n') + lines = self.client.file_read(self._path).strip().splitlines() + lines.append(str(FstabHelper.Entry(device, mountpoint, filesystem, options, dump))) + self.client.file_write(self._path, '\n'.join(lines)) diff --git a/helpers/setup_json_generator.py b/helpers/setup_json_generator.py deleted file mode 100644 index 11de13f..0000000 --- a/helpers/setup_json_generator.py +++ /dev/null @@ -1,297 +0,0 @@ -# Copyright (C) 2016 iNuron NV -# -# This file is part of Open vStorage Open Source Edition (OSE), -# as available from -# -# http://www.openvstorage.org and -# http://www.openvstorage.com. -# -# This file is free software; you can redistribute it and/or modify it -# under the terms of the GNU Affero General Public License v3 (GNU AGPLv3) -# as published by the Free Software Foundation, in version 3 as it comes -# in the LICENSE.txt file of the Open vStorage OSE distribution. -# -# Open vStorage is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY of any kind. - -import json -import socket -from ovs.lib.helpers.toolbox import Toolbox -import difflib - - -class Setup_json_generator(object): - def __init__(self): - self.json = { - 'ci': {}, - 'scenarios': [], - 'setup': { - "domains": [], "backends": [], "storagerouters": {} - } - } - - self._presets = [] - self._domains = [] - self._backends = [] - self._ips = [] - - def get_dict(self): - return self.json - - def write_to_json(self, path): - with open(path, 'w') as fp: - json.dump(self.get_dict(), indent=4, sort_keys=True, fp=fp) - - def model_scenarios(self, scenarios=None): - if not isinstance(scenarios,list) and scenarios is not None: - raise ValueError('Scenarios should be passed in a list format, not {}'.format(type(scenarios))) - if scenarios == None: - self.json['scenarios'] = ["ALL"] - else: - self.json['scenarios'] = scenarios - - def model_ci(self, grid_ip, optional_params=None): - if optional_params is None: - optional_params = {} - - try: - socket.inet_aton(grid_ip) - except TypeError: - raise ValueError('Invalid ip adress provided: {}'.format(grid_ip)) - - bool_options = {'setup': True, 'validation': True, 'scenarios': True, 'cleanup': False, 'send_to_testrail': True, - 'fail_on_failed_scenario': True} - int_options = {'setup_retries': 1, 'scenario_retries': 1} - str_options = {'version': 'fargo', 'config_manager': 'arakoon'} - - for key, value in optional_params: - if key in bool_options.keys() and isinstance(value, bool): - bool_options[key] = value - elif key in int_options.keys() and isinstance(value, int): - int_options[key] = value - elif key in str_options.keys() and isinstance(value, str): - str_options[key] = value - - ci = { - "setup": bool_options['setup'], # Required - "validation": bool_options['validation'], # Not even used - "scenarios": bool_options['scenarios'], # Optional - "cleanup": bool_options['cleanup'], # optional - "send_to_testrail": bool_options['send_to_testrail'], # Optional - "fail_on_failed_scenario": bool_options['fail_on_failed_scenario'], # Optional - "setup_retries": int_options['setup_retries'], # Optional - "scenario_retries": int_options['scenario_retries'], # Optional - "version": str_options['version'], - "local_hypervisor": { # enkel voor testen, niet voor setup - "type": "KVM", - "user": "root", - "password": "rooter" - }, - - "hypervisor": {}, - "config_manager": str_options['config_manager'], - "user": { - "shell": { - "username": "root", - "password": "rooter" - }, - "api": { - "username": "admin", - "password": "admin" - } - }, - "grid_ip": grid_ip - } - self.json['ci'] = ci - - def _add_hypervisor(self, machine_ip, vms=None): - if vms is None: - vms = {'vm_ip': {'name': '_name', 'role': '_role'}} - hypervisor_dict = { - "type": "KVM", - 'ip': machine_ip, - 'user': 'root', - 'password': 'rooter', - 'vms': vms - } - for i in vms.keys(): - self._ips.append(i) - self.json['ci']['hypervisor'] = hypervisor_dict - - def add_storagerouter(self, storagerouter_ip, hostname): - try: - socket.inet_aton(storagerouter_ip) - except TypeError: - raise ValueError('Invalid ip adress provided: {0}'.format(storagerouter_ip)) - if not isinstance(hostname,str): - raise ValueError('Invalid hostname provided: {0} must be a string, not {1}'.format(hostname,type(hostname))) - sr_dict = {} - sr_dict[storagerouter_ip] = {"hostname": hostname, - "domains": {}, - "disks": {}, - "vpools": {} - } - self.json['setup']['storagerouters'].update(sr_dict) - - def _add_disk_to_sr(self, storagerouter_ip, name, roles): - if not storagerouter_ip in self.json['setup']['storagerouters'].keys(): - raise ValueError('Storagerouter with IP {0} not found in json'.format(storagerouter_ip)) - d_dict = {} - d_dict[name] = {"roles": roles} - self.json['setup']['storagerouters'][storagerouter_ip]['disks'].update(d_dict) - - def _add_domain_to_sr(self, storagerouter_ip, name, recovery=False): - if not storagerouter_ip in self.json['setup']['storagerouters'].keys(): - raise ValueError('Storagerouter with ip {0} not found in json'.format(storagerouter_ip)) - if not name in self._domains: - raise ValueError('Invalid domain passed: {}'.format(name)) - path = self.json['setup']['storagerouters'][storagerouter_ip]['domains'] - - if recovery is False: - if 'domain_guids' not in path.keys(): - path['domain_guids'] = [name] - else: - path['domain_guids'].append(name) - else: - if 'recovery_domain_guids' not in path.keys(): - path['recovery_domain_guids'] = [name] - else: - path['recovery_domain_guids'].append(name) - - def add_domain(self, domain): - if not isinstance(domain, str): - raise ValueError("domain is no string") - self._domains.append(domain) - self.json['setup']['domains'].append(domain) - - def add_backend(self, name, domains=None, scaling='LOCAL'): - if domains is None: - domains = [] - for domain_name in domains: - if domain_name not in self._domains: - raise ValueError('Invalid domain passed: {}'.format(domain_name)) - - be_dict = {'name': name, - 'domains': {'domain_guids': domains}, - 'scaling': scaling, - 'presets': [], - 'osds': {} - } - self._backends.append(be_dict['name']) - self.json['setup']['backends'].append(be_dict) - - def _add_preset_to_backend(self, backend_name, preset_name, policies, compression='snappy', encryption=None, fragment_size=2097152): - if backend_name not in self._backends: - raise ValueError('Invalid backend passed as argument: {}'.format(backend_name)) - if encryption == None: - encryption = 'none' - preset_dict = { - 'name': preset_name, - 'compression': compression, - 'encryption': encryption, - 'policies': policies, - 'fragment_size': fragment_size, - } - self._presets.append(preset_dict['name']) - for i in range(len(self.json['setup']['backends'])): - if self.json['setup']['backends'][i]['name'] == backend_name: - self.json['setup']['backends'][i]['presets'].append(preset_dict) - # 'presets'].append(preset_dict) - - def _add_osd_to_backend(self, backend_name, osds_on_disks=None, linked_backend=None, linked_preset=None): - if backend_name not in self._backends: - raise ValueError('Invalid backend passed as argument: {}'.format(backend_name)) - - osd_dict = {} - for i in range(len(self.json['setup']['backends'])): - if self.json['setup']['backends'][i]['name'] == backend_name: - scaling = self.json['setup']['backends'][i]['scaling'] - if scaling == 'LOCAL': - if osds_on_disks is None: - raise ValueError('Osd dictionairy required') - osd_dict = osds_on_disks - elif scaling == 'GLOBAL': - if linked_backend not in self._backends: - raise ValueError("Provided backend {} not in known backends".format(linked_backend)) - if linked_preset not in self._presets: - raise ValueError("Provided preset {} not in known presets".format(linked_preset)) - osd_dict = {linked_backend: linked_preset} - - else: - print ValueError('invalid scaling ({0}) passed'.format(scaling)) - self.json['setup']['backends'][i]['osds'].update(osd_dict) - - def add_vpool(self, storagerouter_ip, vpool_name, backend_name, preset, storage_ip): - if backend_name not in self._backends: - raise ValueError("Provided backend {} not in known backends".format(backend_name)) - if preset not in self._presets: - raise ValueError('Provided preset not in known presets'.format(preset)) - - vpool_dict = {'backend_name': backend_name, - 'preset': preset, - 'storage_ip': storage_ip, - 'proxies':1, - 'fragment_cache': { - 'strategy': {'cache_on_read': False, 'cache_on_write': False}, - 'location': "disk" - }, - 'block_cache': { - 'strategy': {'cache_on_read': False, 'cache_on_write': False}, - 'location': "disk" - }, - 'storagedriver': - { - "sco_size": 4, - "cluster_size": 4, - "volume_write_buffer": 512, - "strategy": "none", - "global_write_buffer": 20, - "global_read_buffer": 0, - "deduplication": "non_dedupe", - "dtl_transport": "tcp", - "dtl_mode": "sync" - } - } - self.json['setup']['storagerouters'][storagerouter_ip]['vpools'][vpool_name] = vpool_dict - - def _change_cache(self, storagerouter_ip, vpool, block_cache=True, fragment_cache=True, on_read=True, on_write=True): - try: - vpool = self.json['setup']['storagerouters'][storagerouter_ip]['vpools'][vpool] - if block_cache is True: - vpool['block_cache']['strategy']['cache_on_read'] = on_read - vpool['block_cache']['strategy']['cache_on_write'] = on_write - if fragment_cache is True: - vpool['fragment_cache']['strategy']['cache_on_read'] = on_read - vpool['fragment_cache']['strategy']['cache_on_write'] = on_write - - except KeyError as e: - raise ValueError('Vpool not found with exception {0}'.format(e)) - - @classmethod - def reformat_json(cls, in_path, out_path=None): - if not isinstance(in_path, str) or (out_path is not None and not isinstance(out_path, str)): - raise ValueError('path should be string type, got {}'.format(type(in_path))) - if out_path is None: - out_path = in_path.rstrip('.json') + '_reformatted.json' - - with open(in_path) as json_data: - d = json.load(json_data) - with open(out_path, 'w') as fp: - json.dump(d, fp=fp, indent=4, sort_keys=True) - - @classmethod - def compare_files(cls, file1, file2): - print 'comparing {} and {}'.format(file1, file2) - with open(file1) as fh1, open(file2) as fh2: - lines_file1 = fh1.readlines() - lines_file2 = fh2.readlines() - - d = difflib.Differ() - diff = d.compare(lines_file1, lines_file2) - for i in diff: - if i[0] != ' ': - print i - - - - diff --git a/helpers/setupjsongenerator.py b/helpers/setupjsongenerator.py new file mode 100644 index 0000000..d31b38f --- /dev/null +++ b/helpers/setupjsongenerator.py @@ -0,0 +1,675 @@ +# Copyright (C) 2016 iNuron NV +# +# This file is part of Open vStorage Open Source Edition (OSE), +# as available from +# +# http://www.openvstorage.org and +# http://www.openvstorage.com. +# +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU Affero General Public License v3 (GNU AGPLv3) +# as published by the Free Software Foundation, in version 3 as it comes +# in the LICENSE.txt file of the Open vStorage OSE distribution. +# +# Open vStorage is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY of any kind. +import os +import json +from ci.autotests import AutoTests +from ovs.dal.hybrids.albabackend import AlbaBackend +from ovs.dal.hybrids.diskpartition import DiskPartition +from ovs.lib.helpers.toolbox import Toolbox + + + +class SetupJsonGenerator(object): + """ + Class to automate construction of a setup.json file. + Attributes: + prop config + def dump_json_to_file + def def update_scenarios + def update_ci + def add_hypervisor + def remove_hypervisor + + def add_domain + def remove_domain + + def add_storagerouter + def remove_storagerouter + def add_disk_to_sr + def remove_disk_from_sr + def add_domain_to_sr + def remove_domain_from_sr + + def add_backend + def remove_backend + def add_preset_to_backend + def remove_preset_to_backend + def add_osd_to_backend + + def add_vpool + def remove_vpool + + def change_cache + + """ + HYPERV_KVM = 'KVM' + VPOOL_COUNTER = 1 + + def __init__(self): + self._json_dict = {} + self._presets = [] + self._domains = [] + self._backends = [] + self._ips = [] + + @property + def config(self): + """ + Property containing the currently modelled config dict + :return: return the currently modelled config dict + :rtype: dict + """ + return self._json_dict + + def dump_json_to_file(self, path): + """ + Write current setup dict to a json file in the provided path. + :param path: path to dump json file to + :type path: str + """ + with open(path, 'w') as fp: + json.dump(self.config, indent=4, sort_keys=True, fp=fp) + + def update_scenarios(self, scenarios=None): + """ + Add scenarios to be scheduled in the setup. + :param scenarios: + :type scenarios: list + """ + if not isinstance(scenarios, list) and scenarios is not None: + raise ValueError('Scenarios should be passed in a list format, not {}'.format(type(scenarios))) + + if scenarios is None: + self.config['scenarios'] = ['ALL'] + else: + for scenario in scenarios: + if isinstance(scenario, str) and scenario not in AutoTests.list_tests(): + raise ValueError('Scenario {0} is not a valid scenario path.'.format(scenario)) + + self.config['scenarios'] = scenarios + + def update_ci(self, passed_required_params, passed_optional_params=None): + """ + Set the ci constants of the setup file accordign to the passed parameters. + :param passed_required_params: obligatory parameters for the setup file + :type passed_required_params: dict + :param passed_optional_params: optional parameters + :type passed_optional_params: dict + + """ + required_params_layout = {'setup': (bool, None, True), + 'grid_ip': (str, Toolbox.regex_ip, True)} + + default_params = {'validation': False, + 'cleanup': False, + 'send_to_testrail': True, + 'fail_on_failed_scenario': True, + 'scenarios': True, + 'version': 'andes', + 'config_manager': 'arakoon'} + + if passed_optional_params is None: + passed_optional_params = {} + + for key, value in default_params.iteritems(): + if key not in passed_optional_params.keys(): + passed_optional_params[key] = value + + optional_params_layout = {'validation': (bool, None, False), + 'cleanup': (bool, None, False), + 'send_to_testrail': (bool, None, False), + 'fail_on_failed_scenario': (bool, None, False), + 'setup_retries': (int, {'min': 1}, False), + 'scenarios': (bool, None, False), + 'scenario_retries': (int, {'min': 1}, False), + 'version': (str, ['andes', 'unstable', 'fargo', 'develop'], False), + 'config_manager': (str, 'arakoon', False)} + + Toolbox.verify_required_params(required_params=required_params_layout, actual_params=passed_required_params, verify_keys=True) + Toolbox.verify_required_params(required_params=optional_params_layout, actual_params=passed_optional_params, verify_keys=True) + + if os.system('ping -c 1 {}'.format(passed_required_params['grid_ip'])) != 0: + raise ValueError('No response from ip {0}'.format(required_params_layout['grid_ip'])) + + ci = {'setup': passed_required_params['setup'], + 'cleanup': passed_optional_params['cleanup'], + 'send_to_testrail': passed_optional_params['send_to_testrail'], + 'fail_on_failed_scenario': passed_optional_params['fail_on_failed_scenario'], + 'version': passed_optional_params['version'], + 'scenarios': passed_optional_params['scenarios'], + 'local_hypervisor': {'type': SetupJsonGenerator.HYPERV_KVM, + 'user': 'root', + 'password': 'rooter'}, + 'config_manager': passed_optional_params['config_manager'], + 'user': {'shell': {'username': 'root', + 'password': 'rooter'}, + 'api': {'username': 'admin', + 'password': 'admin'}}, + 'grid_ip': passed_required_params['grid_ip']} + self._json_dict['ci'] = ci + + def add_hypervisor(self, hypervisor_ip, hypervisor_type=HYPERV_KVM, username='root', password='rooter', virtual_machines=None): + """ + Add hypervisor information to the model + :param hypervisor_type: + :param hypervisor_ip: ip of the hypervisor itself + :type hypervisor_ip: str + :param virtual_machines: dict containing the virtual machine ip with their name and according role + :type virtual_machines: dict + example: {1.1.1.1: {'name': 'name1','role': 'VOLDRV'}} + :param username: username to be used in the hypervisor setup + :type username: str + :param password: password to be used in the hypervisor setup + :type password: str + + """ + if 'ci' not in self._json_dict: + raise ValueError('CI constants have to be set before adding hypervisors') + self._validation_ip(hypervisor_ip) + + if virtual_machines is None: + vm_ip = self.config['ci']['grid_ip'] + suffix = vm_ip.split('.', 1)[-1] + virtual_machines = {vm_ip: {'name': 'vm_{0}'.format(suffix), 'role': 'COMPUTE'}} + + if not isinstance(virtual_machines, dict): + raise ValueError('Dict of virtual machines should contain entries like { ip: { `name`: `role`}}') + for key, value in virtual_machines.iteritems(): + self._validation_ip(key) + + hypervisor_dict = {'type': hypervisor_type, + 'user': username, + 'password': password, + 'vms': virtual_machines} + + self._ips.extend(virtual_machines.keys()) + if 'hypervisors' not in self.config['ci']: + self.config['ci']['hypervisors'] = {} + self.config['ci']['hypervisors'][hypervisor_ip] = hypervisor_dict + + def remove_hypervisor(self, hypervisor_ip): + try: + self.config['ci']['hypervisors'].pop(hypervisor_ip) + except Exception: + pass + + def add_domain(self, domain): + """ + Add available domains to the model. + :param domain: domainname to add + :type domain: str + """ + if not isinstance(domain, str): + raise ValueError('domain is no string') + self._domains.append(domain) + if 'domains' not in self.config.keys(): + self.config['domains'] = [] + self.config['domains'].append(domain) + + def remove_domain(self, domain): + """ + Remove a domain from the model + :param domain: domain to be removed + :type domain: str + """ + try: + self.config['domains'].remove(domain) + except Exception: + pass + + def add_storagerouter(self, storagerouter_ip, hostname): + """ + Add a storagerouter to the model given the provided ip and hostname. + :param storagerouter_ip: ip address of the storage router + :type storagerouter_ip: str + :param hostname: hostname of the storagerouter + :type hostname: str + """ + self._validation_ip(storagerouter_ip) + required_params = {'hostname': (str, None, True)} + Toolbox.verify_required_params(required_params=required_params, actual_params={'hostname': hostname}, verify_keys=True) + if 'storagerouters' in self.config.keys(): + if storagerouter_ip in self.config['storagerouters']: + raise ValueError('Storagerouter with given ip {0} already defined.'.format(storagerouter_ip)) + else: + if 'storagerouters' not in self.config: + self.config['storagerouters'] = {} + self.config['storagerouters'][storagerouter_ip] = {'hostname': hostname} + + def remove_storagerouter(self, storagerouter_ip): + """ + If a storagerouter with the given ip is present in the model, remove it. + :param storagerouter_ip: ip to remove + :type storagerouter_ip: str + """ + try: + self.config['storagerouters'].pop(storagerouter_ip) + except Exception: + pass + + def add_disk_to_sr(self, storagerouter_ip, name, roles): + """ + Add disk with given name and roles to a storagerouter in the model. + :param storagerouter_ip: + :type storagerouter_ip: str + :param name: name of the disk + :type name: str + :param roles: roles to assign to the disk + :type roles: list + """ + self._valid_storagerouter(storagerouter_ip) + required_params = {'name': (str, None, True), 'roles': (list, None, True)} + Toolbox.verify_required_params(required_params=required_params, actual_params={'name': name, 'roles': roles}, verify_keys=True) + for role in roles: + if role not in DiskPartition.ROLES: + raise ValueError('Provided role {0} is not an allowed role for disk {1}.'.format(role, name)) + disk_dict = {name: {'roles': roles}} + if 'disks' not in self.config['storagerouters'][storagerouter_ip]: + self.config['storagerouters'][storagerouter_ip]['disks'] = {} + self.config['storagerouters'][storagerouter_ip]['disks'].update(disk_dict) + + def remove_disk_from_sr(self, storagerouter_ip, name): + """ + Remove given disk from the specified storagerouter + :param storagerouter_ip: storagerouter to remove disk from + :type storagerouter_ip: str + :param name: name of the disk to be removed + :type name: str + """ + try: + self.config['storagerouters'][storagerouter_ip]['disks'].pop(name) + except Exception: + pass + + def add_domain_to_sr(self, storagerouter_ip, name, recovery=False): + """ + Add domains, present in the model, to a storage router. + :param storagerouter_ip: ip of the storage router + :type storagerouter_ip: str + :param name: name of the domain to add to the storagerouter + :type name: str + :param recovery: true or false whether the domain is a recovery domain or not + :type recovery: bool + """ + self._valid_storagerouter(storagerouter_ip) + Toolbox.verify_required_params(required_params={'name': (str, None, True)}, actual_params={'name': name}, verify_keys=True) + + if name not in self._domains: + raise ValueError('Invalid domain passed: {0}'.format(name)) + + path = self.config['storagerouters'][storagerouter_ip] + if 'domains' not in path.keys(): + path['domains'] = {} + path = path['domains'] + config_key = 'domain_guids' if recovery is False else 'recovery_domain_guids' + if config_key not in path: + path[config_key] = [] + path[config_key].append(name) + + def remove_domain_from_sr(self, storagerouter_ip, name): + """ + Remove the given domain from the storagerouter + :param storagerouter_ip: storagerouter to remove the domains from + :type storagerouter_ip: str + :param name: name of the domain to remove + :type name: str + """ + try: + _ = self.config['storagerouters'][storagerouter_ip]['domains']['domain_guids'] + _.remove(name) + except Exception: + pass + + def add_backend(self, backend_name, domains=None, scaling='LOCAL'): + """ + Add a backend with provided domains and scaling to the model. + :param backend_name: name of the backend + :type backend_name: str + :param domains: domains the backend is linked to + :type domains: {} + :param scaling: + :type scaling: str + """ + if domains is None: + domains = [] + else: + for domain_name in domains: + if domain_name not in self._domains: + raise ValueError('Invalid domain passed: {0}'.format(domain_name)) + + Toolbox.verify_required_params(required_params={'backend_name': (str, Toolbox.regex_backend, True), + 'domains': (list, self._domains, True), + 'scaling': (str, AlbaBackend.SCALINGS, True)}, + actual_params={'backend_name': backend_name, + 'domains': domains, + 'scaling': scaling}, verify_keys=True) + + be_dict = {'name': backend_name, + 'domains': {'domain_guids': domains}, + 'scaling': scaling} + + self._backends.append(be_dict['name']) + if 'backends' not in self.config: + self.config['backends'] = [] + self.config['backends'].append(be_dict) + + def remove_backend(self, backend_name): + """ + Remove backend with given name from model + :param backend_name: name of the backend to remove + :type backend_name: str + """ + for backend in self.config['backends']: + if backend['name'] == backend_name: + self.config['backends'].pop(self.config['backends'].index(backend)) + + def add_preset_to_backend(self, backend_name, preset_name, policies, compression='snappy', encryption='none', fragment_size=2097152): + """ + Add a preset with provided parameters to given backend. + :param backend_name: name of the backend to which the preset should be added + :type backend_name: str + :param preset_name: name of the preset that should be added + :type preset_name: str + :param policies: nested list of policies + :type policies: list + :param compression: compression level + :type compression: str + :param encryption: encryption level + :type encryption: str + :param fragment_size: + :type fragment_size: int + """ + if backend_name not in self._backends: + raise ValueError('Invalid backend passed as argument: {0}'.format(backend_name)) + + self._check_policies(policies) + + compression_options = ['snappy', 'bz2', 'none'] + if compression not in compression_options: + raise ValueError('Invalid compression format specified, please choose from: "{0}"'.format('", "'.join(compression_options))) + + encryption_options = ['aes-cbc-256', 'aes-ctr-256', 'none'] + if encryption not in encryption_options: + raise ValueError('Invalid encryption format specified, please choose from: "{0}"'.format('", "'.join(encryption_options))) + + if fragment_size is not None and (not isinstance(fragment_size, int) or not 16 <= fragment_size <= 1024 ** 3): + raise ValueError('Fragment size should be a positive integer smaller than 1 GiB') + + Toolbox.verify_required_params(required_params={'backend_name': (str, Toolbox.regex_backend, True), + 'preset_name': (str, Toolbox.regex_preset, True), + 'policies': (list, None, True), + 'fragment_size': (int, None, False)}, + actual_params={'backend_name': backend_name, + 'preset_name': preset_name, + 'policies': policies, + 'fragment_size': fragment_size}, + verify_keys=True) + + if encryption is None: + encryption = 'none' + preset_dict = { + 'name': preset_name, + 'compression': compression, + 'encryption': encryption, + 'policies': policies, + 'fragment_size': fragment_size, + } + self._presets.append(preset_dict['name']) + for i in range(len(self.config['backends'])): + if self.config['backends'][i]['name'] == backend_name: + if 'presets' not in self.config['backends'][i]: + self.config['backends'][i]['presets'] = [] + self.config['backends'][i]['presets'].append(preset_dict) + + def remove_preset_from_backend(self, backend_name, preset_name): + """ + Remove the preset from given backend + :param backend_name: name of the backend in which to search + :type backend_name: str + :param preset_name: preset name to remove + :type preset_name: str + """ + try: + for i in range(len(self.config['backends'])): + if self.config['backends'][i]['name'] == backend_name: + if 'presets' in self.config['backends'][i]: + for j in range(len(self.config['backends'][i]['presets'])): + if self.config['backends'][i]['presets'][j]['name'] == preset_name: + self.config['backends'][i]['presets'].pop(j) + self.config['backends'].remove(i) + except Exception: + pass + + def add_osd_to_backend(self, backend_name, osds_on_disks=None, linked_backend=None, linked_preset=None): + """ + Add an osd to given backend. + :param backend_name: + :type backend_name: str + :param osds_on_disks: + :type osds_on_disks: dict + example: {'1.1.1.1': {'disk1': 2, 'disk2': 2} + :param linked_backend: + :type linked_backend: str + :param linked_preset: + :type linked_preset: str + """ + if osds_on_disks is None: + osds_on_disks = {} + if backend_name not in self._backends: + raise ValueError('Invalid backend passed as argument: {0}'.format(backend_name)) + required_params = {'backend_name': (str, None, True), + 'osds_on_disk': (dict, None, False), + 'linked_backend': (str, Toolbox.regex_backend, False), + 'linked_preset': (str, Toolbox.regex_preset, False)} + actual_params = {'backend_name': backend_name, + 'osds_on_disk': osds_on_disks, + 'linked_backend': linked_backend, + 'linked_preset': linked_preset} + Toolbox.verify_required_params(required_params=required_params, actual_params=actual_params, verify_keys=True) + + osd_dict = {} + for i in range(len(self.config['backends'])): + if self.config['backends'][i]['name'] == backend_name: + scaling = self.config['backends'][i]['scaling'] + if scaling == 'LOCAL': + if osds_on_disks is None: + raise ValueError('Osd dictionary required') + osd_dict = osds_on_disks + elif scaling == 'GLOBAL': + if linked_backend not in self._backends: + raise ValueError('Provided backend {0} not in known backends'.format(linked_backend)) + if linked_preset not in self._presets: + raise ValueError('Provided preset {0} not in known presets'.format(linked_preset)) + osd_dict = {linked_backend: linked_preset} + + else: + print ValueError('invalid scaling ({0}) passed'.format(scaling)) + if 'osds' not in self.config['backends'][i]: + self.config['backends'][i]['osds'] = {} + self.config['backends'][i]['osds'].update(osd_dict) + + def remove_osd_from_backend(self, osd_identifier, backend_name): + """ + Remove the osd from given backend + :param backend_name: name of the backend in which to search + :type backend_name: str + :param osd_identifier: osd name to remove + :type osd_identifier: str + """ + try: + for i in range(len(self.config['backends'])): + if self.config['backends'][i]['name'] == backend_name: + self.config['backends'][i]['osds'].pop(osd_identifier) + except Exception: + pass + + def add_vpool(self, storagerouter_ip, backend_name, preset_name, storage_ip, vpool_name=None): + """ + Add a vpool to given storagerouter + :param storagerouter_ip + :type storagerouter_ip: str + :param vpool_name: name of the vpool to add + :type vpool_name: str + :param backend_name: name of the backend to link to the vpool + :type backend_name: str + :param preset_name: name of the preste to link to the vpool + :type preset_name: str + :param storage_ip: + :type storage_ip: str + """ + + if vpool_name is None: + vpool_name = 'myvpool{0}'.format(self.VPOOL_COUNTER) + SetupJsonGenerator.VPOOL_COUNTER += 1 + + required_params = {'storagerouter_ip': (str, Toolbox.regex_ip, True), + 'vpool_name': (str, None, False), + 'backend_name': (str, None, True), + 'preset_name': (str, None, True), + 'storage_ip': (str, Toolbox.regex_ip, True)} + + actual_params = {'storagerouter_ip': storagerouter_ip, + 'vpool_name': vpool_name, + 'backend_name': backend_name, + 'preset_name': preset_name, + 'storage_ip': storage_ip} + + Toolbox.verify_required_params(required_params=required_params, actual_params=actual_params, verify_keys=True) + self._valid_storagerouter(storagerouter_ip=storagerouter_ip) + self._validation_ip(ip=storage_ip) + if backend_name not in self._backends: + raise ValueError('Provided backend {0} not in known backends'.format(backend_name)) + if preset_name not in self._presets: + raise ValueError('Provided preset not in known presets'.format(preset_name)) + vpool_dict = {'backend_name': backend_name, + 'preset': preset_name, + 'storage_ip': storage_ip, + 'proxies': 1, + 'fragment_cache': {'strategy': {'cache_on_read': False, 'cache_on_write': False}, + 'location': 'disk' + }, + 'block_cache': {'strategy': {'cache_on_read': False, 'cache_on_write': False}, + 'location': 'disk' + }, + 'storagedriver': {'sco_size': 4, + 'cluster_size': 4, + 'volume_write_buffer': 512, + 'strategy': 'none', + 'global_write_buffer': 20, + 'global_read_buffer': 0, + 'deduplication': 'non_dedupe', + 'dtl_transport': 'tcp', + 'dtl_mode': 'sync' + } + } + if 'vpools' not in self.config['storagerouters'][storagerouter_ip]: + self.config['storagerouters'][storagerouter_ip]['vpools'] = {} + self.config['storagerouters'][storagerouter_ip]['vpools'][vpool_name] = vpool_dict + + def remove_vpool(self, storagerouter_ip, vpool_name): + """ + Try to remove a vpool on storagerouter with given ip + :param storagerouter_ip: search for vpool on given storagerouter + :type storagerouter_ip: str + :param vpool_name: remove vpool with this name + :type vpool_name: str + """ + try: + self.config['storagerouters'][storagerouter_ip]['vpools'].pop(vpool_name) + except Exception: + pass + + def change_cache(self, storagerouter_ip, vpool, block_cache=True, fragment_cache=True, on_read=True, on_write=True): + """ + Change the caching parameters of a given vpool on a given storagerouter. By default, change parameters of both block chache and fragment cache. + :param storagerouter_ip: search for vpool on this storagerouter + :type storagerouter_ip: str + :param vpool: change cache options of given vpool + :type vpool: str + :param block_cache: change block cache parameters, default True + :type block_cache: bool + :param fragment_cache: change fragment cache parameters, default True + :type fragment_cache: bool + :param on_read: change onread parameters, default True + :type on_read: bool + :param on_write: chance onwrite parameters, default True + :type on_write: bool + """ + self._valid_storagerouter(storagerouter_ip=storagerouter_ip) + + required_params = {'vpool': (str, None, True), + 'block_cache': (bool, None, False), + 'fragment_cache': (bool, None, False), + 'on_read': (bool, None, False), + 'on_write': (bool, None, False)} + actual_params = {'vpool': vpool, + 'block_cache': block_cache, + 'fragment_cache': fragment_cache, + 'on_read': on_read, + 'on_write': on_write} + Toolbox.verify_required_params(required_params=required_params, actual_params=actual_params, verify_keys=True) + try: + vpool = self.config['storagerouters'][storagerouter_ip]['vpools'][vpool] + except KeyError: + raise ValueError('Vpool {0} not found'.format(vpool)) + if block_cache is True: + vpool['block_cache']['strategy']['cache_on_read'] = on_read + vpool['block_cache']['strategy']['cache_on_write'] = on_write + if fragment_cache is True: + vpool['fragment_cache']['strategy']['cache_on_read'] = on_read + vpool['fragment_cache']['strategy']['cache_on_write'] = on_write + + def _valid_storagerouter(self, storagerouter_ip): + self._validation_ip(storagerouter_ip) + if storagerouter_ip not in self.config['storagerouters']: + raise ValueError('Storagerouter with ip {0} not found in json'.format(storagerouter_ip)) + + def _validation_ip(self, ip): + required_params = {'storagerouter_ip': (str, Toolbox.regex_ip, True)} + try: + Toolbox.verify_required_params(required_params=required_params, actual_params={'storagerouter_ip': ip}, verify_keys=True) + except RuntimeError as e: + raise ValueError(e) + if os.system('ping -c 1 {0}'.format(ip)) != 0: + raise ValueError('No response from ip {0}'.format(ip)) + + def _check_policies(self, policies): + class _Policy(object): + def __init__(self, policy): + if not isinstance(policy, list) or len(policy) != 4: + raise ValueError('Policy {0} must be of type list with length = 4'.format(policy)) + self.k, self.c, self.m, self.x = policy + if all(isinstance(entry, int) for entry in policy) is False: + raise ValueError('All policy entries should be integers') + + def get_policy_as_dict(self): + return {'k': self.k, 'c': self.c, 'm': self.m, 'x': self.x} + + def get_policy_as_list(self): + return [self.k, self.c, self.x, self.m] + + def check_policy(self): + if self.k > self.c: + raise ValueError('Invalid policy: k({0}) < c({1}) is required'.format(self.k, self.c)) + if self.c > self.k + self.m: + raise ValueError('Invalid policy: c({0}) < k + m ({1} + {2}) is required'.format(self.c, self.k, self.m)) + clone = self.get_policy_as_dict() + clone.pop('m') + if 0 in clone.values(): + raise ValueError('Policy: {0}: {1} cannot be equal to zero'.format(self.get_policy_as_list(), ''.join([i[0] for i in clone.items() if i[1] == 0]))) + + for i in policies: + _Policy(i).check_policy() diff --git a/helpers/storagerouter.py b/helpers/storagerouter.py index 3278d0d..22503d4 100644 --- a/helpers/storagerouter.py +++ b/helpers/storagerouter.py @@ -14,10 +14,10 @@ # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. -from ci.scenario_helpers.ci_constants import CIConstants -from ovs.dal.lists.storagerouterlist import StorageRouterList from ovs.dal.hybrids.storagerouter import StorageRouter +from ovs.dal.lists.storagerouterlist import StorageRouterList from ovs.extensions.generic.logger import Logger +from ..helpers.ci_constants import CIConstants class StoragerouterHelper(CIConstants): @@ -64,9 +64,9 @@ def get_storagerouter_ip(storagerouter_guid): return StorageRouter(storagerouter_guid).ip @staticmethod - def get_disk_by_guid(guid, diskname): + def get_disk_by_name(guid, diskname): """ - Fetch a disk by its ip and name + Fetch a disk by its guid and name :param guid: guid of a storagerouter :type guid: str diff --git a/helpers/tests/json_generator_testcase.py b/helpers/tests/json_generator_testcase.py deleted file mode 100644 index 71f17d0..0000000 --- a/helpers/tests/json_generator_testcase.py +++ /dev/null @@ -1,313 +0,0 @@ -import unittest -from ci.api_lib.helpers.setup_json_generator import Setup_json_generator -import json -import difflib -import pprint - -class Json_generator_testcase(unittest.TestCase): - - def __init__(self,*args, **kwargs): - super(Json_generator_testcase, self).__init__(*args, **kwargs) - self.generator = Setup_json_generator() - self.ip = '10.100.100.100' - - def test_structure(self): - self.assertEquals(len((self.generator).get_dict().keys()), 3) - - def test_model_ci(self): - self.generator.model_ci(grid_ip=self.ip) - self.assertTrue(isinstance(self.generator.get_dict()['ci']['setup'],bool)) - - def test_model_scenarios(self): - self.generator.model_scenarios() - self.assertEquals(self.generator.get_dict()['scenarios'],['ALL']) - self.generator.model_scenarios(['ABC','def']) - self.assertEquals(self.generator.get_dict()['scenarios'],['ABC', 'def']) - - def test_add_domain(self): - self.generator.add_domain('domain1') - self.generator.add_domain('domain2') - self.assertEquals(len(self.generator.get_dict()['setup']['domains']), 2) - with self.assertRaises(ValueError): - self.generator.add_domain(7) - - def test_add_storagerouter(self): - self.generator.add_domain('domain1') - self.generator.add_domain('domain2') - with self.assertRaises(ValueError): - self.generator.add_storagerouter(storagerouter_ip=100, hostname='hostname') - with self.assertRaises(ValueError): - self.generator.add_storagerouter(storagerouter_ip=self.ip, hostname=7) - self.generator.add_storagerouter(storagerouter_ip=self.ip,hostname='hostname') - self.assertTrue(self.ip in self.generator.get_dict()['setup']['storagerouters'].keys()) - - self.generator._add_disk_to_sr(storagerouter_ip=self.ip,name='disk1',roles=['role1','role2']) - self.assertTrue('disk1' in self.generator.get_dict()['setup']['storagerouters'][self.ip]['disks']) - self.assertEquals(len(self.generator.get_dict()['setup']['storagerouters'][self.ip]['disks']['disk1']['roles']), 2) - - self.generator._add_domain_to_sr(storagerouter_ip=self.ip, name='domain1') - self.generator._add_domain_to_sr(storagerouter_ip=self.ip, name='domain1',recovery=True) - self.assertEquals(len(self.generator.get_dict()['setup']['storagerouters'][self.ip]['domains']['domain_guids']), 1) - self.assertEquals(len(self.generator.get_dict()['setup']['storagerouters'][self.ip]['domains']['recovery_domain_guids']), 1) - - self.generator._add_domain_to_sr(storagerouter_ip=self.ip, name='domain2') - self.generator._add_domain_to_sr(storagerouter_ip=self.ip, name='domain2',recovery=True) - self.assertEquals(len(self.generator.get_dict()['setup']['storagerouters'][self.ip]['domains']['domain_guids']), 2) - self.assertEquals(len(self.generator.get_dict()['setup']['storagerouters'][self.ip]['domains']['recovery_domain_guids']), 2) - - def test_add_backend(self): - self.generator.add_domain('domain1') - self.generator.add_domain('domain2') - - self.generator.add_backend(name='mybackend', domains=['domain1']) - self.assertItemsEqual(self.generator.get_dict()['setup']['backends'][0].keys(),['name','domains','scaling','presets','osds']) - self.generator.add_backend(name='mybackend_02', domains=['domain1'],scaling='GLOBAL') - self.assertItemsEqual(self.generator.get_dict()['setup']['backends'][1].keys(),['name','domains','scaling','presets','osds']) - - self.generator._add_preset_to_backend(backend_name='mybackend_02',preset_name='mypreset',policies=[1,2,2,1]) - self.assertEqual(self.generator.get_dict()['setup']['backends'][1]['name'],'mybackend_02') - with self.assertRaises(ValueError): - self.generator._add_preset_to_backend(backend_name='non-existing_backend',preset_name='mypreset',policies=[1,2,2,1]) - - self.generator._add_osd_to_backend(backend_name='mybackend',osds_on_disks={self.ip: {'vdb': 2}}) - self.assertEqual(self.generator.get_dict()['setup']['backends'][0]['osds'][self.ip]['vdb'],2) - with self.assertRaises(ValueError): - self.generator._add_osd_to_backend(backend_name='mybackend_02',osds_on_disks={self.ip: {'vdb': 2}}) - self.generator._add_osd_to_backend(backend_name='mybackend_02',linked_backend='mybackend',linked_preset='mypreset') - self.assertEqual(self.generator.get_dict()['setup']['backends'][1]['osds']['mybackend'],'mypreset') - - def test_add_vpool(self): - vpoolname = 'vpool01' - self.generator.add_domain('domain1') - self.generator.add_storagerouter(storagerouter_ip=self.ip, hostname='hostname') - self.generator.add_backend(name='mybackend', domains=['domain1']) - self.generator._add_preset_to_backend(backend_name='mybackend',preset_name='mypreset',policies=[1,2,2,1]) - with self.assertRaises(ValueError): - self.generator.add_vpool(storagerouter_ip=self.ip, vpool_name=vpoolname, backend_name='non-existing_backend',preset='mypreset',storage_ip=self.ip) - with self.assertRaises(ValueError): - self.generator.add_vpool(storagerouter_ip=self.ip, vpool_name=vpoolname, backend_name='mybackend',preset='non-existing_preset',storage_ip=self.ip) - - self.generator.add_vpool(storagerouter_ip=self.ip, vpool_name=vpoolname, backend_name='mybackend',preset='mypreset',storage_ip=self.ip) - self.assertTrue(vpoolname in self.generator.get_dict()['setup']['storagerouters'][self.ip]['vpools'].keys()) - self.assertTrue('storagedriver' in self.generator.get_dict()['setup']['storagerouters'][self.ip]['vpools'][vpoolname].keys()) - - def test_full_flow(self): - self.generator.model_ci('10.100.199.171') - self.generator._add_hypervisor(machine_ip='10.100.69.222', vms = {'10.100.199.171': {'name': 'ubuntu16.04-ovsnode01-setup1', - 'role': 'COMPUTE'}, - '10.100.199.172': {'name': 'ubuntu16.04-ovsnode02-setup1', - 'role': 'VOLDRV'}, - '10.100.199.173': {'name': 'ubuntu16.04-ovsnode03-setup1', - 'role': 'VOLDRV'} - }) - - self.generator.model_scenarios() - self.generator.add_domain('Roubaix') - self.generator.add_domain('Gravelines') - self.generator.add_domain('Strasbourg') - - #### add backends #### - - self.generator.add_backend(name='mybackend', domains=['Roubaix']) - self.generator._add_osd_to_backend(backend_name='mybackend',osds_on_disks={'10.100.199.171': {'sde': 2,'sdf': 2}, - '10.100.199.172': {'sde': 2,'sdf': 2}, - '10.100.199.173': {'sde': 2, 'sdf': 2}}) - self.generator._add_preset_to_backend(backend_name='mybackend',preset_name='mypreset',policies=[[1,2,2,1]]) - - self.generator.add_backend(name='mybackend02',domains=['Gravelines']) - self.generator._add_preset_to_backend(backend_name='mybackend02',preset_name='mypreset',policies=[[1,2,2,1]]) - self.generator._add_osd_to_backend(backend_name='mybackend02',osds_on_disks={'10.100.199.171': {'sdg': 2}, - '10.100.199.172': {'sdg': 2}, - '10.100.199.173': {'sdg': 2}}) - - self.generator.add_backend(name='mybackend-global',domains=['Roubaix','Gravelines','Strasbourg'],scaling='GLOBAL') - self.generator._add_preset_to_backend(backend_name='mybackend-global',preset_name='mypreset',policies=[[1,2,2,1]]) - self.generator._add_osd_to_backend(backend_name='mybackend-global',linked_backend='mybackend',linked_preset='mypreset') - self.generator._add_osd_to_backend(backend_name='mybackend-global',linked_backend='mybackend02',linked_preset='mypreset') - - #### add storagerouter 1 - - self.generator.add_storagerouter(storagerouter_ip='10.100.199.171', hostname='ovs-node-1-1604') - self.generator._add_domain_to_sr(storagerouter_ip='10.100.199.171',name='Roubaix') - self.generator._add_domain_to_sr(storagerouter_ip='10.100.199.171',name='Gravelines',recovery=True) - self.generator._add_domain_to_sr(storagerouter_ip='10.100.199.171',name='Strasbourg',recovery=True) - - self.generator._add_disk_to_sr(storagerouter_ip='10.100.199.171',name='sda',roles=['WRITE','DTL']) - self.generator._add_disk_to_sr(storagerouter_ip='10.100.199.171',name='sdb',roles=['DB']) - self.generator._add_disk_to_sr(storagerouter_ip='10.100.199.171',name='sdc',roles=['SCRUB']) - - self.generator.add_vpool(storagerouter_ip='10.100.199.171', vpool_name='myvpool01', backend_name='mybackend-global',preset='mypreset',storage_ip='10.100.199.171') - self.generator._change_cache(storagerouter_ip='10.100.199.171',vpool='myvpool01',block_cache=True,fragment_cache=False,on_write=False) - self.generator._change_cache(storagerouter_ip='10.100.199.171',vpool='myvpool01',fragment_cache=True,block_cache=False,on_read=False,on_write=True) - - - #### add storagerouter2 - - self.generator.add_storagerouter(storagerouter_ip='10.100.199.172', hostname='ovs-node-2-1604') - self.generator._add_domain_to_sr(storagerouter_ip='10.100.199.172',name='Gravelines') - self.generator._add_domain_to_sr(storagerouter_ip='10.100.199.172',name='Roubaix',recovery=True) - self.generator._add_domain_to_sr(storagerouter_ip='10.100.199.172',name='Strasbourg',recovery=True) - - self.generator._add_disk_to_sr(storagerouter_ip='10.100.199.172',name='sda',roles=['WRITE','DTL']) - self.generator._add_disk_to_sr(storagerouter_ip='10.100.199.172',name='sdb',roles=['DB']) - self.generator._add_disk_to_sr(storagerouter_ip='10.100.199.172',name='sdc',roles=['SCRUB']) - - self.generator.add_vpool(storagerouter_ip='10.100.199.172', vpool_name='myvpool01', backend_name='mybackend-global',preset='mypreset',storage_ip='10.100.199.172') - self.generator._change_cache(storagerouter_ip='10.100.199.172',vpool='myvpool01',fragment_cache=True,block_cache=True,on_write=False, on_read=True) - - - - #### add storagerouter 3 - - self.generator.add_storagerouter(storagerouter_ip='10.100.199.173', hostname='ovs-node-3-1604') - self.generator._add_domain_to_sr(storagerouter_ip='10.100.199.173',name='Gravelines') - self.generator._add_domain_to_sr(storagerouter_ip='10.100.199.173',name='Roubaix',recovery=True) - self.generator._add_domain_to_sr(storagerouter_ip='10.100.199.173',name='Strasbourg',recovery=True) - - - self.generator._add_disk_to_sr(storagerouter_ip='10.100.199.173',name='sda',roles=['WRITE','DTL']) - self.generator._add_disk_to_sr(storagerouter_ip='10.100.199.173',name='sdb',roles=['DB']) - self.generator._add_disk_to_sr(storagerouter_ip='10.100.199.173',name='sdc',roles=['SCRUB']) - self.generator.add_vpool(storagerouter_ip='10.100.199.173', vpool_name='myvpool01', backend_name='mybackend-global',preset='mypreset',storage_ip='10.100.199.173') - - - expected_output = {u'ci': {u'cleanup': False, - u'config_manager': u'arakoon', - u'fail_on_failed_scenario': True, - u'grid_ip': u'10.100.199.171', - u'hypervisor': {u'ip': u'10.100.69.222', - u'password': u'rooter', - u'type': u'KVM', - u'user': u'root', - u'vms': {u'10.100.199.171': {u'name': u'ubuntu16.04-ovsnode01-setup1', - u'role': u'COMPUTE'}, - u'10.100.199.172': {u'name': u'ubuntu16.04-ovsnode02-setup1', - u'role': u'VOLDRV'}, - u'10.100.199.173': {u'name': u'ubuntu16.04-ovsnode03-setup1', - u'role': u'VOLDRV'}} - }, - u'local_hypervisor': {u'password': u'rooter', - u'type': u'KVM', - u'user': u'root'}, - u'scenario_retries': 1, - u'scenarios': True, - u'send_to_testrail': True, - u'setup': True, - u'setup_retries': 1, - u'user': {u'api': {u'password': u'admin', u'username': u'admin'}, - u'shell': {u'password': u'rooter', u'username': u'root'}}, - u'validation': True, - u'version': u'fargo'}, - u'scenarios': [u'ALL'], - u'setup': {u'backends': [{u'domains': {u'domain_guids': [u'Roubaix']}, - u'name': u'mybackend', - u'osds': {u'10.100.199.171': {u'sde': 2, u'sdf': 2}, - u'10.100.199.172': {u'sde': 2, u'sdf': 2}, - u'10.100.199.173': {u'sde': 2, u'sdf': 2}}, - u'presets': [{u'compression': u'snappy', - u'encryption': u'none', - u'fragment_size': 2097152, - u'name': u'mypreset', - u'policies': [[1, 2, 2, 1]]}], - u'scaling': u'LOCAL'}, - {u'domains': {u'domain_guids': [u'Gravelines']}, - u'name': u'mybackend02', - u'osds': {u'10.100.199.171': {u'sdg': 2}, - u'10.100.199.172': {u'sdg': 2}, - u'10.100.199.173': {u'sdg': 2}}, - u'presets': [{u'compression': u'snappy', - u'encryption': u'none', - u'fragment_size': 2097152, - u'name': u'mypreset', - u'policies': [[1, 2, 2, 1]]}], - u'scaling': u'LOCAL'}, - {u'domains': {u'domain_guids': [u'Roubaix', u'Gravelines', u'Strasbourg']}, - u'name': u'mybackend-global', - u'osds': {u'mybackend': u'mypreset', u'mybackend02': u'mypreset'}, - u'presets': [{u'compression': u'snappy', - u'encryption': u'none', - u'fragment_size': 2097152, - u'name': u'mypreset', - u'policies': [[1, 2, 2, 1]]}], - u'scaling': u'GLOBAL'}], - u'domains': [u'Roubaix', u'Gravelines', u'Strasbourg'], - u'storagerouters': {u'10.100.199.171': {u'disks': {u'sda': {u'roles': [u'WRITE', - u'DTL']}, - u'sdb': {u'roles': [u'DB']}, - u'sdc': {u'roles': [u'SCRUB']}}, - u'domains': {u'domain_guids': [u'Roubaix'], - u'recovery_domain_guids': [u'Gravelines', u'Strasbourg']}, - u'hostname': u'ovs-node-1-1604', - u'vpools': {u'myvpool01': {u'backend_name': u'mybackend-global', - u'block_cache': {u'location': u'disk', - u'strategy': {u'cache_on_read': True, u'cache_on_write': False}}, - u'fragment_cache': {u'location': u'disk', - u'strategy': {u'cache_on_read': False, u'cache_on_write': True}}, - u'preset': u'mypreset', - u'proxies': 1, - u'storage_ip': u'10.100.199.171', - u'storagedriver': {u'cluster_size': 4, - u'dtl_mode': u'sync', - u'dtl_transport': u'tcp', - u'global_write_buffer': 20, - u'global_read_buffer': 0, - u'deduplication': "non_dedupe", - u'strategy': "none", - u'sco_size': 4, - u'volume_write_buffer': 512}}}}, - u'10.100.199.172': {u'disks': {u'sda': {u'roles': [u'WRITE', u'DTL']}, - u'sdb': {u'roles': [u'DB']}, - u'sdc': {u'roles': [u'SCRUB']}}, - u'domains': {u'domain_guids': [u'Gravelines'], - u'recovery_domain_guids': [u'Roubaix', u'Strasbourg']}, - u'hostname': u'ovs-node-2-1604', - u'vpools': {u'myvpool01': {u'backend_name': u'mybackend-global', - u'block_cache': {u'location': u'disk', - u'strategy': {u'cache_on_read': True, u'cache_on_write': False}}, - u'fragment_cache': {u'location': u'disk', - u'strategy': {u'cache_on_read': True, u'cache_on_write': False}}, - u'preset': u'mypreset', - u'proxies': 1, - u'storage_ip': u'10.100.199.172', - u'storagedriver': {u'cluster_size': 4, - u'dtl_mode': u'sync', - u'dtl_transport': u'tcp', - u'global_write_buffer': 20, - u'global_read_buffer': 0, - u'deduplication': "non_dedupe", - u'strategy': "none", - u'sco_size': 4, - u'volume_write_buffer': 512}}}}, - u'10.100.199.173': {u'disks': {u'sda': {u'roles': [u'WRITE', u'DTL']}, - u'sdb': {u'roles': [u'DB']}, - u'sdc': {u'roles': [u'SCRUB']}}, - u'domains': {u'domain_guids': [u'Gravelines'], - u'recovery_domain_guids': [u'Roubaix', u'Strasbourg']}, - u'hostname': u'ovs-node-3-1604', - u'vpools': {u'myvpool01': {u'backend_name': u'mybackend-global', - u'block_cache': {u'location': u'disk', - u'strategy': {u'cache_on_read': False, u'cache_on_write': False}}, - u'fragment_cache': {u'location': u'disk', - u'strategy': {u'cache_on_read': False, u'cache_on_write': False}}, - u'preset': u'mypreset', - u'proxies': 1, - u'storage_ip': u'10.100.199.173', - u'storagedriver': {u'cluster_size': 4, - u'dtl_mode': u'sync', - u'dtl_transport': u'tcp', - u'global_write_buffer': 20, - u'global_read_buffer': 0, - u'deduplication': "non_dedupe", - u'strategy': "none", - u'sco_size': 4, - u'volume_write_buffer': 512}}}} - } - } - } - - - self.assertDictEqual(self.generator.get_dict(),expected_output) - - -if __name__ == '__main__': - unittest.main() diff --git a/helpers/tests/jsongeneratortestcase.py b/helpers/tests/jsongeneratortestcase.py new file mode 100644 index 0000000..075bb42 --- /dev/null +++ b/helpers/tests/jsongeneratortestcase.py @@ -0,0 +1,369 @@ +# Copyright (C) 2016 iNuron NV +# +# This file is part of Open vStorage Open Source Edition (OSE), +# as available from +# +# http://www.openvstorage.org and +# http://www.openvstorage.com. +# +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU Affero General Public License v3 (GNU AGPLv3) +# as published by the Free Software Foundation, in version 3 as it comes +# in the LICENSE.txt file of the Open vStorage OSE distribution. +# +# Open vStorage is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY of any kind. +import unittest +from helpers.setupjsongenerator import SetupJsonGenerator + + +class JsonGeneratorTestcase(unittest.TestCase): + def __init__(self, *args, **kwargs): + super(JsonGeneratorTestcase, self).__init__(*args, **kwargs) + self.generator = SetupJsonGenerator() + self.ip_1 = '127.0.0.1' + self.ip_2 = '127.0.0.2' + self.ip_3 = '127.0.0.3' + self.ip_4 = '127.0.0.4' + + def test_structure(self): + self.assertEquals(len(self.generator.config.keys()), 0) + self.assertTrue(isinstance(self.generator.config, dict)) + + def test_model_ci(self): + self.generator.update_ci(passed_required_params={'setup': True, 'grid_ip': self.ip_1}) + self.assertTrue(isinstance(self.generator.config['ci']['setup'], bool)) + self.assertEquals(len(self.generator.config['ci']), 10) + + def test_add_hypervisor(self): + self.generator.update_ci(passed_required_params={'setup': True, 'grid_ip': self.ip_1}) + with self.assertRaises(ValueError): + self.generator.add_hypervisor(hypervisor_ip='5') + self.generator.add_hypervisor(hypervisor_ip=self.ip_1) + self.assertEquals(len(self.generator.config['ci']), 11) + + def test_remove_hypervisor(self): + self.generator.update_ci(passed_required_params={'setup': True, 'grid_ip': self.ip_1}) + self.generator.add_hypervisor(hypervisor_ip=self.ip_1) + self.generator.add_hypervisor(hypervisor_ip=self.ip_2) + self.generator.remove_hypervisor(hypervisor_ip=self.ip_2) + self.assertEquals(len(self.generator.config['ci']['hypervisors']), 1) + + def test_model_scenarios(self): + self.generator.update_scenarios() + self.assertEquals(self.generator.config['scenarios'], ['ALL']) + with self.assertRaises(ValueError): + self.generator.update_scenarios(['ABC', 'def']) + + def test_add_domain(self): + self.generator.add_domain('domain1') + self.generator.add_domain('domain2') + self.assertEquals(len(self.generator.config['domains']), 2) + with self.assertRaises(ValueError): + self.generator.add_domain(7) + + def test_remove_domain(self): + self.generator.add_domain('domain1') + self.generator.add_domain('domain2') + self.generator.remove_domain('domain1') + self.assertEquals(len(self.generator.config['domains']), 1) + + def test_storagerouter_addition_removal(self): + self.generator.add_domain('domain1') + self.generator.add_domain('domain2') + with self.assertRaises(ValueError): + self.generator.add_storagerouter(storagerouter_ip='100', hostname='hostname') + self.generator.add_storagerouter(storagerouter_ip=self.ip_1, hostname='hostname') + self.assertTrue(self.ip_1 in self.generator.config['storagerouters'].keys()) + self.generator.add_storagerouter(storagerouter_ip=self.ip_2, hostname='hostname') + self.generator.remove_storagerouter(storagerouter_ip=self.ip_2) + self.assertFalse(self.ip_2 in self.generator.config['storagerouters'].keys()) + + with self.assertRaises(ValueError): + self.generator.add_disk_to_sr(storagerouter_ip='5', name='disk1', roles=['SCRUB', 'DTL']) + with self.assertRaises(ValueError): + self.generator.add_disk_to_sr(storagerouter_ip=self.ip_1, name='disk1', roles=['bla']) + self.generator.add_disk_to_sr(storagerouter_ip=self.ip_1, name='disk1', roles=['SCRUB', 'DTL']) + self.assertTrue('disk1' in self.generator.config['storagerouters'][self.ip_1]['disks']) + self.assertEquals(len(self.generator.config['storagerouters'][self.ip_1]['disks']['disk1']['roles']), 2) + self.generator.add_disk_to_sr(storagerouter_ip=self.ip_1, name='disk2', roles=['DB']) + self.generator.remove_disk_from_sr(storagerouter_ip=self.ip_1, name='disk2') + self.assertFalse('disk2' in self.generator.config['storagerouters'][self.ip_1]) + + self.generator.add_domain_to_sr(storagerouter_ip=self.ip_1, name='domain1') + self.generator.add_domain_to_sr(storagerouter_ip=self.ip_1, name='domain1', recovery=True) + self.generator.add_domain_to_sr(storagerouter_ip=self.ip_1, name='domain2') + self.generator.remove_domain_from_sr(storagerouter_ip=self.ip_1, name='domain2') + self.assertFalse('domain2' in self.generator.config['storagerouters'][self.ip_1]) + + self.assertEquals(len(self.generator.config['storagerouters'][self.ip_1]['domains']['domain_guids']), 1) + self.assertEquals(len(self.generator.config['storagerouters'][self.ip_1]['domains']['recovery_domain_guids']), 1) + + self.generator.add_domain_to_sr(storagerouter_ip=self.ip_1, name='domain2') + self.generator.add_domain_to_sr(storagerouter_ip=self.ip_1, name='domain2', recovery=True) + self.assertEquals(len(self.generator.config['storagerouters'][self.ip_1]['domains']['domain_guids']), 2) + self.assertEquals(len(self.generator.config['storagerouters'][self.ip_1]['domains']['recovery_domain_guids']), 2) + + def test_backend_addition_removal(self): + self.generator.add_domain('domain1') + self.generator.add_domain('domain2') + + self.generator.add_backend(backend_name='mybackend', domains=['domain1']) + self.assertItemsEqual(self.generator.config['backends'][0].keys(), ['name', 'domains', 'scaling']) + self.generator.add_backend(backend_name='mybackend02', domains=['domain1'], scaling='GLOBAL') + self.assertItemsEqual(self.generator.config['backends'][1].keys(), ['name', 'domains', 'scaling']) + + self.generator.add_preset_to_backend(backend_name='mybackend02', preset_name='mypreset', policies=[[1, 2, 2, 1]]) + self.assertEqual(self.generator.config['backends'][1]['name'], 'mybackend02') + with self.assertRaises(ValueError): + self.generator.add_preset_to_backend(backend_name='non-existing_backend', preset_name='mypreset', policies=[1, 2, 2, 1]) + + self.generator.add_osd_to_backend(backend_name='mybackend', osds_on_disks={self.ip_1: {'vdb': 2}}) + self.assertEquals(len(self.generator.config['backends'][0]['osds']), 1) + + self.generator.add_osd_to_backend(backend_name='mybackend', osds_on_disks={self.ip_2: {'vdb': 2}}) + self.assertEquals(len(self.generator.config['backends'][0]['osds']), 2) + + self.generator.remove_osd_from_backend(backend_name='mybackend', osd_identifier=self.ip_2) + self.assertEquals(len(self.generator.config['backends'][0]['osds']), 1) + + self.assertEqual(self.generator.config['backends'][0]['osds'][self.ip_1]['vdb'], 2) + with self.assertRaises(ValueError): + self.generator.add_osd_to_backend(backend_name='mybackend02', osds_on_disks={self.ip_1: {'vdb': 2}}) + self.generator.add_osd_to_backend(backend_name='mybackend02', linked_backend='mybackend', linked_preset='mypreset') + self.assertEqual(self.generator.config['backends'][1]['osds']['mybackend'], 'mypreset') + self.generator.remove_backend('mybackend02') + self.assertNotEquals(len(self.generator.config['backends']), 3) + + def test_vpool_addition_removal(self): + vpoolname = 'vpool01' + self.generator.add_domain('domain1') + self.generator.add_storagerouter(storagerouter_ip=self.ip_1, hostname='hostname') + self.generator.add_backend(backend_name='mybackend', domains=['domain1']) + self.generator.add_preset_to_backend(backend_name='mybackend', preset_name='mypreset', policies=[[1, 2, 2, 1]]) + with self.assertRaises(ValueError): + self.generator.add_vpool(storagerouter_ip=self.ip_1, vpool_name=vpoolname, backend_name='non-existing_backend', preset_name='mypreset', storage_ip=self.ip_1) + with self.assertRaises(ValueError): + self.generator.add_vpool(storagerouter_ip=self.ip_1, vpool_name=vpoolname, backend_name='mybackend', preset_name='non-existing_preset', storage_ip=self.ip_1) + + self.generator.add_vpool(storagerouter_ip=self.ip_1, vpool_name=vpoolname, backend_name='mybackend', preset_name='mypreset', storage_ip=self.ip_1) + self.assertTrue(vpoolname in self.generator.config['storagerouters'][self.ip_1]['vpools'].keys()) + self.assertTrue('storagedriver' in self.generator.config['storagerouters'][self.ip_1]['vpools'][vpoolname].keys()) + + self.generator.add_vpool(storagerouter_ip=self.ip_1, backend_name='mybackend', preset_name='mypreset', storage_ip=self.ip_1, vpool_name='vpool1000') + self.generator.remove_vpool(storagerouter_ip=self.ip_1, vpool_name='vpool1000') + self.assertFalse('vpool1000' in self.generator.config['storagerouters'][self.ip_1]['vpools']) + + def test_full_flow(self): + self.generator.update_ci(passed_required_params={'setup': True, 'grid_ip': self.ip_1}) + self.generator.add_hypervisor(hypervisor_ip=self.ip_1, + virtual_machines={self.ip_2: {'name': 'ubuntu16.04-ovsnode01-setup1', + 'role': 'COMPUTE'}, + self.ip_3: {'name': 'ubuntu16.04-ovsnode02-setup1', + 'role': 'VOLDRV'}, + self.ip_4: {'name': 'ubuntu16.04-ovsnode03-setup1', + 'role': 'VOLDRV'}}) + + self.generator.update_scenarios() + self.generator.add_domain('Roubaix') + self.generator.add_domain('Gravelines') + self.generator.add_domain('Strasbourg') + + # add backends #### + + self.generator.add_backend(backend_name='mybackend', domains=['Roubaix']) + self.generator.add_osd_to_backend(backend_name='mybackend', osds_on_disks={self.ip_2: {'sde': 2, 'sdf': 2}, + self.ip_3: {'sde': 2, 'sdf': 2}, + self.ip_4: {'sde': 2, 'sdf': 2}}) + self.generator.add_preset_to_backend(backend_name='mybackend', preset_name='mypreset', policies=[[1, 2, 2, 1]]) + + self.generator.add_backend(backend_name='mybackend02', domains=['Gravelines']) + self.generator.add_preset_to_backend(backend_name='mybackend02', preset_name='mypreset', policies=[[1, 2, 2, 1]]) + self.generator.add_osd_to_backend(backend_name='mybackend02', osds_on_disks={self.ip_2: {'sdg': 2}, + self.ip_3: {'sdg': 2}, + self.ip_4: {'sdg': 2}}) + + self.generator.add_backend(backend_name='mybackend-global', domains=['Roubaix', 'Gravelines', 'Strasbourg'], scaling='GLOBAL') + self.generator.add_preset_to_backend(backend_name='mybackend-global', preset_name='mypreset', policies=[[1, 2, 2, 1]]) + self.generator.add_osd_to_backend(backend_name='mybackend-global', linked_backend='mybackend', linked_preset='mypreset') + self.generator.add_osd_to_backend(backend_name='mybackend-global', linked_backend='mybackend02', linked_preset='mypreset') + + # add storagerouter 1 + + self.generator.add_storagerouter(storagerouter_ip=self.ip_2, hostname='ovs-node-1-1604') + self.generator.add_domain_to_sr(storagerouter_ip=self.ip_2, name='Roubaix') + self.generator.add_domain_to_sr(storagerouter_ip=self.ip_2, name='Gravelines', recovery=True) + self.generator.add_domain_to_sr(storagerouter_ip=self.ip_2, name='Strasbourg', recovery=True) + + self.generator.add_disk_to_sr(storagerouter_ip=self.ip_2, name='sda', roles=['WRITE', 'DTL']) + self.generator.add_disk_to_sr(storagerouter_ip=self.ip_2, name='sdb', roles=['DB']) + self.generator.add_disk_to_sr(storagerouter_ip=self.ip_2, name='sdc', roles=['SCRUB']) + + self.generator.add_vpool(storagerouter_ip=self.ip_2, vpool_name='myvpool01', backend_name='mybackend-global', preset_name='mypreset', storage_ip=self.ip_1) + self.generator.change_cache(storagerouter_ip=self.ip_2, vpool='myvpool01', block_cache=True, fragment_cache=False, on_write=False) + self.generator.change_cache(storagerouter_ip=self.ip_2, vpool='myvpool01', fragment_cache=True, block_cache=False, on_read=False, on_write=True) + + # add storagerouter2 + + self.generator.add_storagerouter(storagerouter_ip=self.ip_3, hostname='ovs-node-2-1604') + self.generator.add_domain_to_sr(storagerouter_ip=self.ip_3, name='Gravelines') + self.generator.add_domain_to_sr(storagerouter_ip=self.ip_3, name='Roubaix', recovery=True) + self.generator.add_domain_to_sr(storagerouter_ip=self.ip_3, name='Strasbourg', recovery=True) + + self.generator.add_disk_to_sr(storagerouter_ip=self.ip_3, name='sda', roles=['WRITE', 'DTL']) + self.generator.add_disk_to_sr(storagerouter_ip=self.ip_3, name='sdb', roles=['DB']) + self.generator.add_disk_to_sr(storagerouter_ip=self.ip_3, name='sdc', roles=['SCRUB']) + + self.generator.add_vpool(storagerouter_ip=self.ip_3, vpool_name='myvpool01', backend_name='mybackend-global', preset_name='mypreset', storage_ip=self.ip_1) + self.generator.change_cache(storagerouter_ip=self.ip_3, vpool='myvpool01', fragment_cache=True, block_cache=True, on_write=False, on_read=True) + + # add storagerouter 3 + + self.generator.add_storagerouter(storagerouter_ip=self.ip_4, hostname='ovs-node-3-1604') + self.generator.add_domain_to_sr(storagerouter_ip=self.ip_4, name='Gravelines') + self.generator.add_domain_to_sr(storagerouter_ip=self.ip_4, name='Roubaix', recovery=True) + self.generator.add_domain_to_sr(storagerouter_ip=self.ip_4, name='Strasbourg', recovery=True) + + self.generator.add_disk_to_sr(storagerouter_ip=self.ip_4, name='sda', roles=['WRITE', 'DTL']) + self.generator.add_disk_to_sr(storagerouter_ip=self.ip_4, name='sdb', roles=['DB']) + self.generator.add_disk_to_sr(storagerouter_ip=self.ip_4, name='sdc', roles=['SCRUB']) + self.generator.add_vpool(storagerouter_ip=self.ip_4, vpool_name='myvpool01', backend_name='mybackend-global', preset_name='mypreset', storage_ip=self.ip_1) + + expected_output = {u'ci': {u'cleanup': False, + u'config_manager': u'arakoon', + u'fail_on_failed_scenario': True, + u'grid_ip': u'127.0.0.1', + u'hypervisors': {u'127.0.0.1': {u'password': u'rooter', + u'type': u'KVM', + u'user': u'root', + u'vms': {u'127.0.0.2': {u'name': u'ubuntu16.04-ovsnode01-setup1', + u'role': u'COMPUTE'}, + u'127.0.0.3': {u'name': u'ubuntu16.04-ovsnode02-setup1', + u'role': u'VOLDRV'}, + u'127.0.0.4': {u'name': u'ubuntu16.04-ovsnode03-setup1', + u'role': u'VOLDRV'}}}}, + u'local_hypervisor': {u'password': u'rooter', + u'type': u'KVM', + u'user': u'root'}, + u'scenarios': True, + u'send_to_testrail': True, + u'setup': True, + u'user': {u'api': {u'password': u'admin', u'username': u'admin'}, + u'shell': {u'password': u'rooter', u'username': u'root'}}, + u'version': u'andes'}, + u'scenarios': [u'ALL'], + u'backends': [{u'domains': {u'domain_guids': [u'Roubaix']}, + u'name': u'mybackend', + u'osds': {u'127.0.0.2': {u'sde': 2, u'sdf': 2}, + u'127.0.0.3': {u'sde': 2, u'sdf': 2}, + u'127.0.0.4': {u'sde': 2, u'sdf': 2}}, + u'presets': [{u'compression': u'snappy', + u'encryption': u'none', + u'fragment_size': 2097152, + u'name': u'mypreset', + u'policies': [[1, 2, 2, 1]]}], + u'scaling': u'LOCAL'}, + {u'domains': {u'domain_guids': [u'Gravelines']}, + u'name': u'mybackend02', + u'osds': {u'127.0.0.2': {u'sdg': 2}, + u'127.0.0.3': {u'sdg': 2}, + u'127.0.0.4': {u'sdg': 2}}, + u'presets': [{u'compression': u'snappy', + u'encryption': u'none', + u'fragment_size': 2097152, + u'name': u'mypreset', + u'policies': [[1, 2, 2, 1]]}], + u'scaling': u'LOCAL'}, + {u'domains': {u'domain_guids': [u'Roubaix', u'Gravelines', u'Strasbourg']}, + u'name': u'mybackend-global', + u'osds': {u'mybackend': u'mypreset', u'mybackend02': u'mypreset'}, + u'presets': [{u'compression': u'snappy', + u'encryption': u'none', + u'fragment_size': 2097152, + u'name': u'mypreset', + u'policies': [[1, 2, 2, 1]]}], + u'scaling': u'GLOBAL'}], + + u'domains': [u'Roubaix', u'Gravelines', u'Strasbourg'], + u'storagerouters': {u'127.0.0.2': {u'disks': {u'sda': {u'roles': [u'WRITE', + u'DTL']}, + u'sdb': {u'roles': [u'DB']}, + u'sdc': {u'roles': [u'SCRUB']}}, + u'domains': {u'domain_guids': [u'Roubaix'], + u'recovery_domain_guids': [u'Gravelines', u'Strasbourg']}, + u'hostname': u'ovs-node-1-1604', + u'vpools': {u'myvpool01': {u'backend_name': u'mybackend-global', + u'block_cache': {u'location': u'disk', + u'strategy': {u'cache_on_read': True, u'cache_on_write': False}}, + u'fragment_cache': {u'location': u'disk', + u'strategy': {u'cache_on_read': False, u'cache_on_write': True}}, + u'preset': u'mypreset', + u'proxies': 1, + u'storage_ip': u'127.0.0.1', + u'storagedriver': {u'cluster_size': 4, + u'dtl_mode': u'sync', + u'dtl_transport': u'tcp', + u'global_write_buffer': 20, + u'global_read_buffer': 0, + u'deduplication': "non_dedupe", + u'strategy': "none", + u'sco_size': 4, + u'volume_write_buffer': 512}}}}, + u'127.0.0.3': {u'disks': {u'sda': {u'roles': [u'WRITE', u'DTL']}, + u'sdb': {u'roles': [u'DB']}, + u'sdc': {u'roles': [u'SCRUB']}}, + u'domains': {u'domain_guids': [u'Gravelines'], + u'recovery_domain_guids': [u'Roubaix', u'Strasbourg']}, + u'hostname': u'ovs-node-2-1604', + u'vpools': {u'myvpool01': {u'backend_name': u'mybackend-global', + u'block_cache': {u'location': u'disk', + u'strategy': {u'cache_on_read': True, u'cache_on_write': False}}, + u'fragment_cache': {u'location': u'disk', + u'strategy': {u'cache_on_read': True, u'cache_on_write': False}}, + u'preset': u'mypreset', + u'proxies': 1, + u'storage_ip': u'127.0.0.1', + u'storagedriver': {u'cluster_size': 4, + u'dtl_mode': u'sync', + u'dtl_transport': u'tcp', + u'global_write_buffer': 20, + u'global_read_buffer': 0, + u'deduplication': "non_dedupe", + u'strategy': "none", + u'sco_size': 4, + u'volume_write_buffer': 512}}}}, + u'127.0.0.4': {u'disks': {u'sda': {u'roles': [u'WRITE', u'DTL']}, + u'sdb': {u'roles': [u'DB']}, + u'sdc': {u'roles': [u'SCRUB']}}, + u'domains': {u'domain_guids': [u'Gravelines'], + u'recovery_domain_guids': [u'Roubaix', u'Strasbourg']}, + u'hostname': u'ovs-node-3-1604', + u'vpools': {u'myvpool01': {u'backend_name': u'mybackend-global', + u'block_cache': {u'location': u'disk', + u'strategy': {u'cache_on_read': False, u'cache_on_write': False}}, + u'fragment_cache': {u'location': u'disk', + u'strategy': {u'cache_on_read': False, u'cache_on_write': False}}, + u'preset': u'mypreset', + u'proxies': 1, + u'storage_ip': u'127.0.0.1', + u'storagedriver': {u'cluster_size': 4, + u'dtl_mode': u'sync', + u'dtl_transport': u'tcp', + u'global_write_buffer': 20, + u'global_read_buffer': 0, + u'deduplication': "non_dedupe", + u'strategy': "none", + u'sco_size': 4, + u'volume_write_buffer': 512}}}} + } + } + + self.assertDictEqual(self.generator.config['ci'], expected_output[u'ci']) + self.assertEqual(self.generator.config['domains'], expected_output[u'domains']) + self.assertDictEqual(self.generator.config['storagerouters'], expected_output[u'storagerouters']) + self.assertEqual(self.generator.config['backends'], expected_output[u'backends']) + + self.assertDictEqual(self.generator.config, expected_output) + +if __name__ == '__main__': + unittest.main() diff --git a/helpers/vdisk.py b/helpers/vdisk.py index c6fc8be..fa8c1ba 100644 --- a/helpers/vdisk.py +++ b/helpers/vdisk.py @@ -13,11 +13,12 @@ # # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. -from ci.scenario_helpers.ci_constants import CIConstants + from ovs.dal.hybrids.vdisk import VDisk from ovs.dal.lists.vdisklist import VDiskList from ovs.dal.lists.vpoollist import VPoolList from ovs.extensions.generic.logger import Logger +from ..helpers.ci_constants import CIConstants from ..helpers.exceptions import VPoolNotFoundError, VDiskNotFoundError diff --git a/remove/backend.py b/remove/backend.py index 0aa4fc2..9b832aa 100644 --- a/remove/backend.py +++ b/remove/backend.py @@ -13,10 +13,10 @@ # # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. -from ci.scenario_helpers.ci_constants import CIConstants from ovs.extensions.generic.logger import Logger from ..helpers.albanode import AlbaNodeHelper from ..helpers.backend import BackendHelper +from ..helpers.ci_constants import CIConstants from ..validate.decorators import required_backend, required_preset diff --git a/remove/roles.py b/remove/roles.py index 93308b6..61891d2 100644 --- a/remove/roles.py +++ b/remove/roles.py @@ -14,46 +14,54 @@ # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. -from ci.scenario_helpers.ci_constants import CIConstants from subprocess import check_output +from ovs.extensions.generic.system import System from ovs.extensions.generic.logger import Logger +from ovs.extensions.generic.sshclient import SSHClient +from ..helpers.ci_constants import CIConstants from ..helpers.fstab import FstabHelper from ..helpers.storagerouter import StoragerouterHelper from ..setup.roles import RoleSetup + class RoleRemover(CIConstants): LOGGER = Logger("remove-ci_role_remover") CONFIGURE_DISK_TIMEOUT = 300 @staticmethod - def _umount(mountpoint): + def _umount(mountpoint, client=None): """ Unmount the given partition :param mountpoint: Location where the mountpoint is mounted :type mountpoint: str :return: """ + if client is None: + client = SSHClient(System.get_my_storagerouter(), username='root') try: - check_output('umount {0}'.format(mountpoint), shell=True) + client.run(['umount', mountpoint]) except Exception: RoleRemover.LOGGER.exception('Unable to umount mountpoint {0}'.format(mountpoint)) raise RuntimeError('Could not unmount {0}'.format(mountpoint)) @staticmethod - def _remove_filesystem(device, alias_part_label): + def _remove_filesystem(device, alias_part_label, client=None): """ :param alias_part_label: eg /dev/disk/by-partlabel/ata-QEMU_HARDDISK_QM00011 :type alias_part_label: str :return: """ + if client is None: + client = SSHClient(System.get_my_storagerouter(), username='root') try: partition_cmd = "udevadm info --name={0} | awk -F '=' '/ID_PART_ENTRY_NUMBER/{{print $NF}}'".format(alias_part_label) - partition_number = check_output(partition_cmd, shell=True) - format_cmd = 'parted {0} rm {1}'.format(device, partition_number) - check_output(format_cmd, shell=True) + partition_number = client.run(partition_cmd, allow_insecure=True) + if partition_number: + format_cmd = 'parted {0} rm {1}'.format(device, partition_number) + client.run(format_cmd.split()) except Exception: RoleRemover.LOGGER.exception('Unable to remove filesystem of {0}'.format(alias_part_label)) raise RuntimeError('Could not remove filesystem of {0}'.format(alias_part_label)) @@ -61,16 +69,19 @@ def _remove_filesystem(device, alias_part_label): @classmethod def remove_role(cls, storagerouter_ip, diskname): allowed_roles = ['WRITE', 'DTL', 'SCRUB', 'DB'] - RoleRemover.LOGGER.info("Starting removal of disk roles.") + cls.LOGGER.info("Starting removal of disk roles.") # Fetch information + storagerouter = StoragerouterHelper.get_storagerouter_by_ip(storagerouter_ip=storagerouter_ip) - disk = StoragerouterHelper.get_disk_by_guid(guid=storagerouter.guid, diskname=diskname) + disk = StoragerouterHelper.get_disk_by_name(guid=storagerouter.guid, diskname=diskname) # Check if there are any partitions on the disk, if so check if there is enough space + client = SSHClient(storagerouter, username='root') + if len(disk.partitions) > 0: for partition in disk.partitions: # Remove all partitions that have roles if set(partition.roles).issubset(allowed_roles) and len(partition.roles) > 0: - RoleRemover.LOGGER.info("Removing {0} from partition {1} on disk {2}".format(partition.roles, partition.guid, diskname)) + cls.LOGGER.info("Removing {0} from partition {1} on disk {2}".format(partition.roles, partition.guid, diskname)) RoleSetup.configure_disk(storagerouter_guid=storagerouter.guid, disk_guid=disk.guid, offset=partition.offset, @@ -78,19 +89,19 @@ def remove_role(cls, storagerouter_ip, diskname): roles=[], partition_guid=partition.guid) - # Unmount partition - RoleRemover.LOGGER.info("Umounting disk {2}".format(partition.roles, partition.guid, diskname)) - RoleRemover._umount(partition.mountpoint) + + cls._umount(partition.mountpoint, client=client) # Remove from fstab - RoleRemover.LOGGER.info("Removing {0} from fstab".format(partition.mountpoint, partition.guid, diskname)) - FstabHelper().remove_by_mountpoint(partition.mountpoint) + + cls.LOGGER.info("Removing {0} from fstab".format(partition.mountpoint, partition.guid, diskname)) + FstabHelper(client=client).remove_by_mountpoint(partition.mountpoint,client) # Remove filesystem - RoleRemover.LOGGER.info("Removing filesystem on partition {0} on disk {1}".format(partition.guid, diskname)) + cls.LOGGER.info("Removing filesystem on partition {0} on disk {1}".format(partition.guid, diskname)) alias = partition.aliases[0] device = '/dev/{0}'.format(diskname) - RoleRemover._remove_filesystem(device, alias) + cls._remove_filesystem(device, alias,client=client) # Remove partition from model - RoleRemover.LOGGER.info("Removing partition {0} on disk {1} from model".format(partition.guid, diskname)) + cls.LOGGER.info("Removing partition {0} on disk {1} from model".format(partition.guid, diskname)) partition.delete() else: print 'Found no roles on partition' diff --git a/remove/vdisk.py b/remove/vdisk.py index de42e1d..d683163 100644 --- a/remove/vdisk.py +++ b/remove/vdisk.py @@ -14,8 +14,8 @@ # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. -from ci.scenario_helpers.ci_constants import CIConstants from ovs.extensions.generic.logger import Logger +from ..helpers.ci_constants import CIConstants from ..helpers.vdisk import VDiskHelper from ..validate.decorators import required_vtemplate @@ -44,9 +44,9 @@ def remove_vdisks_with_structure(cls, vdisks, timeout=REMOVE_VDISK_TIMEOUT): continue if len(vdisk.child_vdisks_guids) > 0: for vdisk_child_guid in vdisk.child_vdisks_guids: - VDiskRemover.remove_vdisk(vdisk_child_guid, api) + VDiskRemover.remove_vdisk(vdisk_child_guid) removed_guids.append(vdisk_child_guid) - VDiskRemover.remove_vdisk(vdisk.guid, api, timeout) + VDiskRemover.remove_vdisk(vdisk.guid, timeout) removed_guids.append(vdisk.guid) @classmethod diff --git a/remove/vpool.py b/remove/vpool.py index 2261eac..132bdf5 100644 --- a/remove/vpool.py +++ b/remove/vpool.py @@ -14,8 +14,8 @@ # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. -from ci.scenario_helpers.ci_constants import CIConstants from ovs.extensions.generic.logger import Logger +from ..helpers.ci_constants import CIConstants from ..helpers.storagerouter import StoragerouterHelper from ..helpers.vpool import VPoolHelper diff --git a/setup/backend.py b/setup/backend.py index bd26070..ac7503e 100644 --- a/setup/backend.py +++ b/setup/backend.py @@ -14,10 +14,11 @@ # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. import time -from ci.scenario_helpers.ci_constants import CIConstants + from ovs.extensions.generic.logger import Logger from ..helpers.albanode import AlbaNodeHelper from ..helpers.backend import BackendHelper +from ..helpers.ci_constants import CIConstants from ..validate.decorators import required_roles, required_backend, required_preset, check_backend, check_preset, \ check_linked_backend, filter_osds @@ -216,6 +217,7 @@ def add_asds(cls, target, disks, albabackend_name, claim_retries=MAX_CLAIM_RETRI 'slot_id': slot_id, 'osd_type': 'ASD', 'alba_backend_guid': alba_backend_guid}) + node_slot_information[alba_node_guid] = slot_information for alba_node_guid, slot_information in node_slot_information.iteritems(): BackendSetup.LOGGER.info('Posting {0} for alba_node_guid {1}'.format(slot_information, alba_node_guid)) @@ -326,12 +328,13 @@ def _fill_slots(cls, alba_node_guid, slot_information, timeout=INITIALIZE_DISK_T :return: """ data = {'slot_information': slot_information} - + print '_fill_slots: data: ', data task_guid = cls.api.post( api='/alba/nodes/{0}/fill_slots/'.format(alba_node_guid), data=data ) task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) + print 'fill_slots', task_result if not task_result[0]: error_msg = "Initialize disk `{0}` for alba node `{1}` has failed".format(data, alba_node_guid) BackendSetup.LOGGER.error(error_msg) @@ -388,6 +391,7 @@ def link_backend(cls, albabackend_name, globalbackend_name, preset_name, timeout :type timeout: int :return: """ + local_albabackend = BackendHelper.get_albabackend_by_name(albabackend_name) data = { diff --git a/setup/domain.py b/setup/domain.py index 68159b7..059ca7a 100644 --- a/setup/domain.py +++ b/setup/domain.py @@ -14,9 +14,9 @@ # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. -from ci.scenario_helpers.ci_constants import CIConstants from ovs.extensions.generic.logger import Logger from ..helpers.backend import BackendHelper +from ..helpers.ci_constants import CIConstants from ..helpers.domain import DomainHelper from ..helpers.storagerouter import StoragerouterHelper from ..validate.decorators import required_backend @@ -34,11 +34,10 @@ def add_domain(cls, domain_name): """ Add a new (recovery) domain to the cluster - :param domain_name: name of a new domain + :param domain_name: name of a new domain to add :type domain_name: str :return: """ - # check if domain already exists if not DomainHelper.get_domain_by_name(domain_name): data = {"name": domain_name} diff --git a/setup/roles.py b/setup/roles.py index f781638..624fa68 100644 --- a/setup/roles.py +++ b/setup/roles.py @@ -14,8 +14,8 @@ # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. -from ci.scenario_helpers.ci_constants import CIConstants from ovs.extensions.generic.logger import Logger +from ..helpers.ci_constants import CIConstants from ..helpers.storagerouter import StoragerouterHelper from ..validate.decorators import check_role_on_disk @@ -50,7 +50,7 @@ def add_disk_role(cls, storagerouter_ip, diskname, roles, min_size=MIN_PARTITION # Fetch information storagerouter_guid = StoragerouterHelper.get_storagerouter_by_ip(storagerouter_ip).guid - disk = StoragerouterHelper.get_disk_by_guid(storagerouter_guid, diskname) + disk = StoragerouterHelper.get_disk_by_name(storagerouter_guid, diskname) # Check if there are any partitions on the disk, if so check if there is enough space unused_partitions = [] if len(disk.partitions) > 0: diff --git a/setup/vdisk.py b/setup/vdisk.py index cf4d9d8..4641bd3 100644 --- a/setup/vdisk.py +++ b/setup/vdisk.py @@ -14,8 +14,8 @@ # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. -from ci.scenario_helpers.ci_constants import CIConstants from ovs.extensions.generic.logger import Logger +from ..helpers.ci_constants import CIConstants from ..helpers.storagerouter import StoragerouterHelper from ..helpers.vdisk import VDiskHelper from ..helpers.vpool import VPoolHelper @@ -36,7 +36,7 @@ def __init__(self): pass @classmethod - def create_snapshot(snapshot_name, vdisk_name, vpool_name, consistent=True, sticky=True, + def create_snapshot(cls, snapshot_name, vdisk_name, vpool_name, consistent=True, sticky=True, timeout=CREATE_SNAPSHOT_TIMEOUT): """ Create a new snapshot for a vdisk @@ -224,7 +224,7 @@ def create_clone(cls, vdisk_name, vpool_name, new_vdisk_name, storagerouter_ip, @classmethod @required_vdisk - def set_vdisk_as_template(vdisk_name, vpool_name, timeout=SET_VDISK_AS_TEMPLATE_TIMEOUT): + def set_vdisk_as_template(cls, vdisk_name, vpool_name, timeout=SET_VDISK_AS_TEMPLATE_TIMEOUT): """ Create a new vDisk on a certain vPool/storagerouter Set a existing vDisk as vTemplate diff --git a/setup/vpool.py b/setup/vpool.py index f338bd2..e197dbd 100644 --- a/setup/vpool.py +++ b/setup/vpool.py @@ -14,10 +14,10 @@ # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. -from ci.scenario_helpers.ci_constants import CIConstants -from ovs.lib.generic import GenericController from ovs.extensions.generic.logger import Logger +from ovs.lib.generic import GenericController from ..helpers.backend import BackendHelper +from ..helpers.ci_constants import CIConstants from ..helpers.storagerouter import StoragerouterHelper from ..validate.decorators import required_roles, check_vpool diff --git a/validate/backend.py b/validate/backend.py index dcc1452..cffccc7 100644 --- a/validate/backend.py +++ b/validate/backend.py @@ -52,7 +52,7 @@ def check_preset_on_backend(preset_name, albabackend_name): @staticmethod def check_policies_on_preset(preset_name, albabackend_name, policies): """ - Check if a preset is available on a backend + Check if given policies match with the specified backend :param preset_name: name of a preset :type preset_name: str @@ -126,16 +126,17 @@ def check_available_osds_on_asdmanager(ip, disks): slot_map[disk_name] = fetched_disk available_disks = {} for disk, amount_asds in disks.iteritems(): + # check if requested disk is present and available in fetched_disks if disk not in slot_map: - BackendValidation.LOGGER.error("Disk `{0}` was NOT found on node `{1}`!".format(ip, disk)) + BackendValidation.LOGGER.error("Disk `{0}` was NOT found on node `{1}`!".format(disk, ip)) continue if slot_map[disk]['available'] is False: - BackendValidation.LOGGER.error("Disk `{0}` is NOT available on node `{1}`!".format(ip, disk)) + BackendValidation.LOGGER.error("Disk `{0}` is NOT available on node `{1}`!".format(disk, ip)) continue # add disk to available disks available_disks[disk] = amount_asds - BackendValidation.LOGGER.info("Disk `{0}` is available on node `{1}`!".format(ip, disk)) + BackendValidation.LOGGER.info("Disk `{0}` is available on node `{1}`!".format(disk, ip)) BackendValidation.LOGGER.info("The following disks are available for use on `{0}`: {1}".format(ip, available_disks)) return available_disks From a56c51a0bc691548d4fc07e75b9cef49ad2a9fe1 Mon Sep 17 00:00:00 2001 From: Simon Van den Bossche Date: Tue, 5 Dec 2017 18:01:44 +0100 Subject: [PATCH 13/28] setup + cleanup --- helpers/api.py | 5 +++++ setup/backend.py | 2 -- validate/roles.py | 1 - 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/helpers/api.py b/helpers/api.py index 434b3f4..0ae083c 100644 --- a/helpers/api.py +++ b/helpers/api.py @@ -266,6 +266,11 @@ def wait_for_task(self, task_id, timeout=None): if timeout is not None and timeout < (time.time() - start): raise TimeOutError('Waiting for task {0} has timed out.'.format(task_id)) task_metadata = self.get('/tasks/{0}/'.format(task_id)) + output = 'Task with ID: {0: >40}, current status: {1: >8}, ready: {2: >7}. Result data: {3}'.format(task_metadata['id'], + task_metadata['status'], + task_metadata['successful'], + task_metadata['result']) + OVSClient._logger.debug(output) finished = task_metadata['status'] in ('FAILURE', 'SUCCESS') if finished is False: if task_metadata != previous_metadata: diff --git a/setup/backend.py b/setup/backend.py index ac7503e..e12477c 100644 --- a/setup/backend.py +++ b/setup/backend.py @@ -328,13 +328,11 @@ def _fill_slots(cls, alba_node_guid, slot_information, timeout=INITIALIZE_DISK_T :return: """ data = {'slot_information': slot_information} - print '_fill_slots: data: ', data task_guid = cls.api.post( api='/alba/nodes/{0}/fill_slots/'.format(alba_node_guid), data=data ) task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) - print 'fill_slots', task_result if not task_result[0]: error_msg = "Initialize disk `{0}` for alba node `{1}` has failed".format(data, alba_node_guid) BackendSetup.LOGGER.error(error_msg) diff --git a/validate/roles.py b/validate/roles.py index 6f30ee9..e1f8eeb 100644 --- a/validate/roles.py +++ b/validate/roles.py @@ -18,7 +18,6 @@ from ..helpers.storagerouter import StoragerouterHelper - class RoleValidation(object): LOGGER = Logger("validate-ci_role_validate") From 4041343784bfa16325da3dc65973aa83e0dac0fc Mon Sep 17 00:00:00 2001 From: Simon Van den Bossche Date: Wed, 13 Dec 2017 15:50:22 +0100 Subject: [PATCH 14/28] -fix reviews -fix scenarios -rework hypervisor credentials --- helpers/api.py | 5 +- helpers/backend.py | 4 +- helpers/ci_constants.py | 68 +---- helpers/disk.py | 16 +- helpers/hypervisor/hypervisor.py | 84 ++++-- helpers/setupjsongenerator.py | 338 +++++++++++++------------ helpers/storagerouter.py | 29 +++ helpers/tests/jsongeneratortestcase.py | 314 ++++++++++++----------- helpers/vdisk.py | 2 + setup/arakoon.py | 43 ++++ setup/vdisk.py | 2 +- 11 files changed, 491 insertions(+), 414 deletions(-) diff --git a/helpers/api.py b/helpers/api.py index 0ae083c..a57eb0a 100644 --- a/helpers/api.py +++ b/helpers/api.py @@ -72,7 +72,6 @@ class OVSClient(object): disable_warnings(InsecureRequestWarning) disable_warnings(SNIMissingWarning) - def __init__(self, ip, username, password, verify=False, version='*', port=None, raw_response=False): """ Initializes the object with credentials and connection information @@ -186,7 +185,6 @@ def _process(self, response, overrule_raw=False): else: raise HttpException(status_code, message) - def _call(self, api, params, func, **kwargs): if not api.endswith('/'): api = '{0}/'.format(api) @@ -266,10 +264,11 @@ def wait_for_task(self, task_id, timeout=None): if timeout is not None and timeout < (time.time() - start): raise TimeOutError('Waiting for task {0} has timed out.'.format(task_id)) task_metadata = self.get('/tasks/{0}/'.format(task_id)) - output = 'Task with ID: {0: >40}, current status: {1: >8}, ready: {2: >7}. Result data: {3}'.format(task_metadata['id'], + output = 'Task with ID: {0: >40}, current status: {1: >8}, ready: {2: >2}. Result data: {3}'.format(task_metadata['id'], task_metadata['status'], task_metadata['successful'], task_metadata['result']) + print output OVSClient._logger.debug(output) finished = task_metadata['status'] in ('FAILURE', 'SUCCESS') if finished is False: diff --git a/helpers/backend.py b/helpers/backend.py index 6733923..6197de4 100644 --- a/helpers/backend.py +++ b/helpers/backend.py @@ -14,7 +14,6 @@ # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. - from ovs.dal.hybrids.albabackend import AlbaBackend from ovs.dal.lists.albabackendlist import AlbaBackendList from ovs.dal.lists.backendlist import BackendList @@ -163,8 +162,7 @@ def get_backend_local_stack(cls, albabackend_name): 'contents': 'local_stack', } return cls.api.get(api='/alba/backends/{0}/'.format(BackendHelper.get_alba_backend_guid_by_name(albabackend_name)), - params={'queryparams': options} - ) + params={'queryparams': options}) @staticmethod def get_alba_backends(): diff --git a/helpers/ci_constants.py b/helpers/ci_constants.py index 7b452fd..c435351 100644 --- a/helpers/ci_constants.py +++ b/helpers/ci_constants.py @@ -17,19 +17,15 @@ from ci.api_lib.helpers.api import OVSClient -CONFIG_LOC = "/opt/OpenvStorage/ci/config/setup.json" -TEST_SCENARIO_LOC = "/opt/OpenvStorage/ci/scenarios/" -SETTINGS_LOC = "/opt/OpenvStorage/ci/config/settings.json" -TESTTRAIL_LOC = "/opt/OpenvStorage/ci/config/testtrail.json" - - class CIConstants(object): """ Collection of multiple constants and constant related instances """ - FIO_BIN = {'url': 'http://www.include.gr/fio.bin.latest', 'location': '/tmp/fio.bin.latest'} - FIO_BIN_EE = {'url': 'http://www.include.gr/fio.bin.latest.ee', 'location': '/tmp/fio.bin.latest'} + CONFIG_LOC = "/opt/OpenvStorage/ci/config/setup.json" + TEST_SCENARIO_LOC = "/opt/OpenvStorage/ci/scenarios/" + SETTINGS_LOC = "/opt/OpenvStorage/ci/config/settings.json" + TESTRAIL_LOC = "/opt/OpenvStorage/ci/config/testrail.json" with open(CONFIG_LOC, 'r') as JSON_CONFIG: SETUP_CFG = json.load(JSON_CONFIG) @@ -37,43 +33,10 @@ class CIConstants(object): with open(SETTINGS_LOC, 'r') as JSON_SETTINGS: SETTINGS = json.load(JSON_SETTINGS) - DATA_TEST_CASES = [(0, 100), (30, 70), (40, 60), (50, 50), (70, 30), (100, 0)] # read write patterns to test (read, write) - - CLOUD_INIT_DATA = { - 'script_loc': 'https://raw.githubusercontent.com/kinvaris/cloud-init/master/create-config-drive', - 'script_dest': '/tmp/cloud_init_script.sh', - 'user-data_loc': '/tmp/user-data-migrate-test', - 'config_dest': '/tmp/cloud-init-config-migrate-test' - } - - # collect details about parent hypervisors - PARENT_HYPERVISOR_INFO = SETUP_CFG['ci'].get('hypervisors') - - # hypervisor details - HYPERVISOR_TYPE = SETUP_CFG['ci']['local_hypervisor']['type'] - HYPERVISOR_USER = SETUP_CFG['ci']['local_hypervisor']['user'] - HYPERVISOR_PASSWORD = SETUP_CFG['ci']['local_hypervisor']['password'] - - HYPERVISOR_INFO = {'type': HYPERVISOR_TYPE, - 'user': HYPERVISOR_USER, - 'password': HYPERVISOR_PASSWORD} - - VM_USERNAME = 'root' # vm credentials & details - VM_PASSWORD = 'rooter' - VM_VCPUS = 4 - VM_VRAM = 1024 # In MB - VM_OS_TYPE = 'ubuntu16.04' - - VM_WAIT_TIME = 300 # wait time before timing out on the vm install in seconds - - VDISK_THREAD_LIMIT = 5 # Each monitor thread queries x amount of vdisks - FIO_VDISK_LIMIT = 50 # Each fio uses x disks - - IO_REFRESH_RATE = 5 # Refresh rate used for polling IO - AMOUNT_TO_WRITE = 1 * 1024 ** 3 # Amount of data to RW to produce IO - - HA_TIMEOUT = 300 - + HYPERVISOR_INFO = SETUP_CFG['ci'].get('hypervisor') + DOMAIN_INFO = SETUP_CFG['setup']['domains'] + BACKEND_INFO = SETUP_CFG['setup']['backends'] + STORAGEROUTER_INFO = SETUP_CFG['setup']['storagerouters'] class classproperty(property): def __get__(self, cls, owner): @@ -84,18 +47,3 @@ def api(cls): return OVSClient(cls.SETUP_CFG['ci']['grid_ip'], cls.SETUP_CFG['ci']['user']['api']['username'], cls.SETUP_CFG['ci']['user']['api']['password']) - - - def __init__(self): - super(CIConstants, self).__init__() - - - @classmethod - def get_shell_user(cls): - """ - Gets the user configured within the setup - :return: dict with the users credentials - :rtype: dict - """ - return {'username': cls.SETUP_CFG['ci']['user']['shell']['username'], - 'password': cls.SETUP_CFG['ci']['user']['shell']['password']} \ No newline at end of file diff --git a/helpers/disk.py b/helpers/disk.py index c589f0c..d9d18e2 100644 --- a/helpers/disk.py +++ b/helpers/disk.py @@ -18,21 +18,16 @@ from ovs.dal.lists.diskpartitionlist import DiskPartitionList from ..helpers.storagerouter import StoragerouterHelper + class DiskHelper(object): """ DiskHelper class """ - def __init__(self): - from ..helpers.storagerouter import StoragerouterHelper - - pass - @staticmethod def get_diskpartitions_by_guid(diskguid): """ Fetch disk partitions by disk guid - :param diskguid: ip address of a storagerouter :type diskguid: str :return: list of DiskPartition Objects @@ -45,9 +40,8 @@ def get_diskpartitions_by_guid(diskguid): def get_roles_from_disks(storagerouter_guid=None): """ Fetch disk roles from all disks with optional storagerouter_ip - - :param storagerouter_ip: ip address of a storage router - :type storagerouter_ip: str + :param storagerouter_guid: guid of a storage router + :type storagerouter_guid: str :return: list of lists with roles :rtype: list > list """ @@ -61,7 +55,6 @@ def get_roles_from_disks(storagerouter_guid=None): def get_disk_by_diskname(storagerouter_guid, disk_name): """ Get a disk object by storagerouter guid and disk name - :param storagerouter_guid: guid address of a storage router :type storagerouter_guid: str :param disk_name: name of a disk (e.g. sda) @@ -69,7 +62,6 @@ def get_disk_by_diskname(storagerouter_guid, disk_name): :return: disk object :rtype: ovs.dal.hybrids.Disk """ - storagerouter = StoragerouterHelper.get_storagerouter_by_guid(storagerouter_guid=storagerouter_guid) for disk in storagerouter.disks: if disk.name == disk_name: @@ -79,7 +71,6 @@ def get_disk_by_diskname(storagerouter_guid, disk_name): def get_roles_from_disk(storagerouter_guid, disk_name): """ Get the roles from a certain disk - :param storagerouter_guid: guid address of a storage router :type storagerouter_guid: str :param disk_name: name of a disk (e.g. sda) @@ -88,7 +79,6 @@ def get_roles_from_disk(storagerouter_guid, disk_name): :rtype: list """ disk = DiskHelper.get_disk_by_diskname(storagerouter_guid, disk_name) - roles_on_disk = [] if disk: for diskpartition in disk.partitions: diff --git a/helpers/hypervisor/hypervisor.py b/helpers/hypervisor/hypervisor.py index 4b88185..66a58b9 100644 --- a/helpers/hypervisor/hypervisor.py +++ b/helpers/hypervisor/hypervisor.py @@ -16,39 +16,73 @@ Hypervisor/ManagementCenter factory module Using the module requires libvirt api to be available on the MACHINE THAT EXECUTES THE CODE """ - from ovs_extensions.generic.filemutex import file_mutex +from ovs.lib.helpers.toolbox import Toolbox +from ...helpers.ci_constants import CIConstants -class HypervisorFactory(object): +class HypervisorFactory(CIConstants): """ HypervisorFactory class provides functionality to get abstracted hypervisor """ - hypervisors = {} - @staticmethod - def get(ip, username, password, hvtype): + @classmethod + def get(cls, hv_credentials=None): """ Returns the appropriate hypervisor client class for a given PMachine + :param hv_credentials: object that contains ip, user, password and hypervisor type + :type hv_credentials: HypervisorCredentials object """ - key = '{0}_{1}'.format(ip, username) - if key not in HypervisorFactory.hypervisors: - mutex = file_mutex('hypervisor_{0}'.format(key)) - try: - mutex.acquire(30) - if key not in HypervisorFactory.hypervisors: - if hvtype == 'VMWARE': - # Not yet tested. Needs to be rewritten - raise NotImplementedError("{0} has not yet been implemented".format(hvtype)) - from .hypervisors.vmware import VMware - hypervisor = VMware(ip, username, password) - elif hvtype == 'KVM': - from .hypervisors.kvm import KVM - hypervisor = KVM(ip, username, password) - else: - raise NotImplementedError('Hypervisor {0} is not yet supported'.format(hvtype)) - HypervisorFactory.hypervisors[key] = hypervisor - finally: - mutex.release() - return HypervisorFactory.hypervisors[key] + if hv_credentials is None: + return cls.get(HypervisorCredentials(ip=CIConstants.HYPERVISOR_INFO['ip'], + user=CIConstants.HYPERVISOR_INFO['user'], + password=CIConstants.HYPERVISOR_INFO['password'], + type=CIConstants.HYPERVISOR_INFO['type'])) + if not isinstance(hv_credentials, HypervisorCredentials): + raise TypeError('Credentials must be of type HypervisorCredentials') + return cls.hypervisors.get(hv_credentials, cls._add_hypervisor(hv_credentials)) + + @staticmethod + def _add_hypervisor(hypervisor_credentials): + ip = hypervisor_credentials.ip + username = hypervisor_credentials.user + password = hypervisor_credentials.password + hvtype = hypervisor_credentials.type + mutex = file_mutex('hypervisor_{0}'.format(hash(hypervisor_credentials))) + try: + mutex.acquire(30) + if hypervisor_credentials not in HypervisorFactory.hypervisors: + if hvtype == 'VMWARE': + # Not yet tested. Needs to be rewritten + raise NotImplementedError("{0} has not yet been implemented".format(hvtype)) + from .hypervisors.vmware import VMware + hypervisor = VMware(ip, username, password) + elif hvtype == 'KVM': + from .hypervisors.kvm import KVM + hypervisor = KVM(ip, username, password) + else: + raise NotImplementedError('Hypervisor {0} is not yet supported'.format(hvtype)) + HypervisorFactory.hypervisors[hypervisor_credentials] = hypervisor + return hypervisor + finally: + mutex.release() + + +class HypervisorCredentials(object): + def __init__(self, ip, user, password, type): + required_params = {'ip': (str, Toolbox.regex_ip), + 'user': (str, None), + 'password': (str, None), + 'type': (str, ['KVM', 'VMWARE'])} + Toolbox.verify_required_params(required_params, {'ip': ip, + 'user': user, + 'password': password, + 'type': type}) + self.ip = ip + self.user = user + self.password = password + self.type = type + + def __str__(self): + return 'hypervisor at ip {0} of type {1}'.format(self.ip, self.type) diff --git a/helpers/setupjsongenerator.py b/helpers/setupjsongenerator.py index d31b38f..58a3ac8 100644 --- a/helpers/setupjsongenerator.py +++ b/helpers/setupjsongenerator.py @@ -18,42 +18,14 @@ from ci.autotests import AutoTests from ovs.dal.hybrids.albabackend import AlbaBackend from ovs.dal.hybrids.diskpartition import DiskPartition +from ovs.extensions.storageserver.storagedriver import StorageDriverClient from ovs.lib.helpers.toolbox import Toolbox - class SetupJsonGenerator(object): """ - Class to automate construction of a setup.json file. - Attributes: - prop config - def dump_json_to_file - def def update_scenarios - def update_ci - def add_hypervisor - def remove_hypervisor - - def add_domain - def remove_domain - - def add_storagerouter - def remove_storagerouter - def add_disk_to_sr - def remove_disk_from_sr - def add_domain_to_sr - def remove_domain_from_sr - - def add_backend - def remove_backend - def add_preset_to_backend - def remove_preset_to_backend - def add_osd_to_backend - - def add_vpool - def remove_vpool - - def change_cache - + This provides class provides code to automate construction of a setup.json file. + Addition and removal of several components of the setup.json is provided """ HYPERV_KVM = 'KVM' VPOOL_COUNTER = 1 @@ -86,7 +58,8 @@ def dump_json_to_file(self, path): def update_scenarios(self, scenarios=None): """ Add scenarios to be scheduled in the setup. - :param scenarios: + :param scenarios: scenarios to add to the 'scenarios' section of the setup.json. + If left blank, by default all scenarios will be scheduled. :type scenarios: list """ if not isinstance(scenarios, list) and scenarios is not None: @@ -101,64 +74,55 @@ def update_scenarios(self, scenarios=None): self.config['scenarios'] = scenarios - def update_ci(self, passed_required_params, passed_optional_params=None): + def update_ci(self, ci_params): """ Set the ci constants of the setup file accordign to the passed parameters. :param passed_required_params: obligatory parameters for the setup file :type passed_required_params: dict :param passed_optional_params: optional parameters :type passed_optional_params: dict - """ - required_params_layout = {'setup': (bool, None, True), - 'grid_ip': (str, Toolbox.regex_ip, True)} - - default_params = {'validation': False, - 'cleanup': False, - 'send_to_testrail': True, - 'fail_on_failed_scenario': True, - 'scenarios': True, - 'version': 'andes', - 'config_manager': 'arakoon'} - - if passed_optional_params is None: - passed_optional_params = {} - - for key, value in default_params.iteritems(): - if key not in passed_optional_params.keys(): - passed_optional_params[key] = value - - optional_params_layout = {'validation': (bool, None, False), - 'cleanup': (bool, None, False), - 'send_to_testrail': (bool, None, False), - 'fail_on_failed_scenario': (bool, None, False), - 'setup_retries': (int, {'min': 1}, False), - 'scenarios': (bool, None, False), - 'scenario_retries': (int, {'min': 1}, False), - 'version': (str, ['andes', 'unstable', 'fargo', 'develop'], False), - 'config_manager': (str, 'arakoon', False)} - - Toolbox.verify_required_params(required_params=required_params_layout, actual_params=passed_required_params, verify_keys=True) - Toolbox.verify_required_params(required_params=optional_params_layout, actual_params=passed_optional_params, verify_keys=True) - - if os.system('ping -c 1 {}'.format(passed_required_params['grid_ip'])) != 0: - raise ValueError('No response from ip {0}'.format(required_params_layout['grid_ip'])) - - ci = {'setup': passed_required_params['setup'], - 'cleanup': passed_optional_params['cleanup'], - 'send_to_testrail': passed_optional_params['send_to_testrail'], - 'fail_on_failed_scenario': passed_optional_params['fail_on_failed_scenario'], - 'version': passed_optional_params['version'], - 'scenarios': passed_optional_params['scenarios'], + params_layout = {'setup': (bool, None, True), + 'grid_ip': (str, Toolbox.regex_ip, True), + 'validation': (bool, None, False), + 'cleanup': (bool, None, False), + 'send_to_testrail': (bool, None, False), + 'fail_on_failed_scenario': (bool, None, False), + 'scenarios': (bool, None, False), + 'scenario_retries': (int, {'min': 1}, False), + 'version': (str, ['andes', 'unstable', 'fargo', 'develop'], False), + 'config_manager': (str, 'arakoon', False)} + + all_params = {'validation': False, + 'cleanup': False, + 'send_to_testrail': True, + 'fail_on_failed_scenario': True, + 'scenarios': True, + 'scenario_retries': 1, + 'version': 'andes', + 'config_manager': 'arakoon'} + + all_params.update(ci_params) + Toolbox.verify_required_params(required_params=params_layout, actual_params=all_params, verify_keys=True) + + if os.system('ping -c 1 {}'.format(all_params['grid_ip'])) != 0: + raise ValueError('No response from ip {0}'.format(all_params['grid_ip'])) + + ci = {'setup': all_params['setup'], + 'cleanup': all_params['cleanup'], + 'send_to_testrail': all_params['send_to_testrail'], + 'fail_on_failed_scenario': all_params['fail_on_failed_scenario'], + 'version': all_params['version'], + 'scenarios': all_params['scenarios'], 'local_hypervisor': {'type': SetupJsonGenerator.HYPERV_KVM, 'user': 'root', 'password': 'rooter'}, - 'config_manager': passed_optional_params['config_manager'], + 'config_manager': all_params['config_manager'], 'user': {'shell': {'username': 'root', 'password': 'rooter'}, 'api': {'username': 'admin', 'password': 'admin'}}, - 'grid_ip': passed_required_params['grid_ip']} + 'grid_ip': all_params['grid_ip']} self._json_dict['ci'] = ci def add_hypervisor(self, hypervisor_ip, hypervisor_type=HYPERV_KVM, username='root', password='rooter', virtual_machines=None): @@ -178,33 +142,37 @@ def add_hypervisor(self, hypervisor_ip, hypervisor_type=HYPERV_KVM, username='ro """ if 'ci' not in self._json_dict: raise ValueError('CI constants have to be set before adding hypervisors') - self._validation_ip(hypervisor_ip) + self._validate_ip(hypervisor_ip) if virtual_machines is None: vm_ip = self.config['ci']['grid_ip'] - suffix = vm_ip.split('.', 1)[-1] - virtual_machines = {vm_ip: {'name': 'vm_{0}'.format(suffix), 'role': 'COMPUTE'}} + name = 'ubuntu_node_'+str(vm_ip.split('.', 2)[-1]).strip() + virtual_machines = {vm_ip: {'name': name, 'role': 'COMPUTE'}} if not isinstance(virtual_machines, dict): raise ValueError('Dict of virtual machines should contain entries like { ip: { `name`: `role`}}') for key, value in virtual_machines.iteritems(): - self._validation_ip(key) + self._validate_ip(key) hypervisor_dict = {'type': hypervisor_type, 'user': username, 'password': password, + 'ip': hypervisor_ip, 'vms': virtual_machines} self._ips.extend(virtual_machines.keys()) - if 'hypervisors' not in self.config['ci']: - self.config['ci']['hypervisors'] = {} - self.config['ci']['hypervisors'][hypervisor_ip] = hypervisor_dict + if 'hypervisor' not in self.config['ci']: + self.config['ci']['hypervisor'] = {} + self.config['ci']['hypervisor'] = hypervisor_dict def remove_hypervisor(self, hypervisor_ip): - try: - self.config['ci']['hypervisors'].pop(hypervisor_ip) - except Exception: - pass + """ + remove the hypervisor with the given ip, if present + :param hypervisor_ip: ip address of the hypervisor to remove + :type hypervisor_ip: str + """ + if self.config['ci']['hypervisor']['ip'] == hypervisor_ip: + self.config['ci'].pop('hypervisor') def add_domain(self, domain): """ @@ -215,9 +183,11 @@ def add_domain(self, domain): if not isinstance(domain, str): raise ValueError('domain is no string') self._domains.append(domain) - if 'domains' not in self.config.keys(): - self.config['domains'] = [] - self.config['domains'].append(domain) + if 'setup' not in self.config.keys(): + self.config['setup'] = {} + if 'domains' not in self.config['setup'].keys(): + self.config['setup']['domains'] = [] + self.config['setup']['domains'].append(domain) def remove_domain(self, domain): """ @@ -226,8 +196,10 @@ def remove_domain(self, domain): :type domain: str """ try: - self.config['domains'].remove(domain) - except Exception: + self.config['setup']['domains'].remove(domain) + if self.config['setup']['domains'] == []: + self.config['setup'].pop('domains') + except KeyError: pass def add_storagerouter(self, storagerouter_ip, hostname): @@ -238,16 +210,18 @@ def add_storagerouter(self, storagerouter_ip, hostname): :param hostname: hostname of the storagerouter :type hostname: str """ - self._validation_ip(storagerouter_ip) + self._validate_ip(storagerouter_ip) required_params = {'hostname': (str, None, True)} Toolbox.verify_required_params(required_params=required_params, actual_params={'hostname': hostname}, verify_keys=True) - if 'storagerouters' in self.config.keys(): - if storagerouter_ip in self.config['storagerouters']: + if 'setup' not in self.config.keys(): + self.config['setup'] = {} + if 'storagerouters' in self.config['setup'].keys(): + if storagerouter_ip in self.config['setup']['storagerouters']: raise ValueError('Storagerouter with given ip {0} already defined.'.format(storagerouter_ip)) else: - if 'storagerouters' not in self.config: - self.config['storagerouters'] = {} - self.config['storagerouters'][storagerouter_ip] = {'hostname': hostname} + if 'storagerouters' not in self.config['setup']: + self.config['setup']['storagerouters'] = {} + self.config['setup']['storagerouters'][storagerouter_ip] = {'hostname': hostname} def remove_storagerouter(self, storagerouter_ip): """ @@ -256,7 +230,7 @@ def remove_storagerouter(self, storagerouter_ip): :type storagerouter_ip: str """ try: - self.config['storagerouters'].pop(storagerouter_ip) + self.config['setup']['storagerouters'].pop(storagerouter_ip) except Exception: pass @@ -277,9 +251,9 @@ def add_disk_to_sr(self, storagerouter_ip, name, roles): if role not in DiskPartition.ROLES: raise ValueError('Provided role {0} is not an allowed role for disk {1}.'.format(role, name)) disk_dict = {name: {'roles': roles}} - if 'disks' not in self.config['storagerouters'][storagerouter_ip]: - self.config['storagerouters'][storagerouter_ip]['disks'] = {} - self.config['storagerouters'][storagerouter_ip]['disks'].update(disk_dict) + if 'disks' not in self.config['setup']['storagerouters'][storagerouter_ip]: + self.config['setup']['storagerouters'][storagerouter_ip]['disks'] = {} + self.config['setup']['storagerouters'][storagerouter_ip]['disks'].update(disk_dict) def remove_disk_from_sr(self, storagerouter_ip, name): """ @@ -290,7 +264,7 @@ def remove_disk_from_sr(self, storagerouter_ip, name): :type name: str """ try: - self.config['storagerouters'][storagerouter_ip]['disks'].pop(name) + self.config['setup']['storagerouters'][storagerouter_ip]['disks'].pop(name) except Exception: pass @@ -310,7 +284,7 @@ def add_domain_to_sr(self, storagerouter_ip, name, recovery=False): if name not in self._domains: raise ValueError('Invalid domain passed: {0}'.format(name)) - path = self.config['storagerouters'][storagerouter_ip] + path = self.config['setup']['storagerouters'][storagerouter_ip] if 'domains' not in path.keys(): path['domains'] = {} path = path['domains'] @@ -328,8 +302,7 @@ def remove_domain_from_sr(self, storagerouter_ip, name): :type name: str """ try: - _ = self.config['storagerouters'][storagerouter_ip]['domains']['domain_guids'] - _.remove(name) + self.config['setup']['storagerouters'][storagerouter_ip]['domains']['domain_guids'].remove(name) except Exception: pass @@ -356,15 +329,15 @@ def add_backend(self, backend_name, domains=None, scaling='LOCAL'): actual_params={'backend_name': backend_name, 'domains': domains, 'scaling': scaling}, verify_keys=True) - be_dict = {'name': backend_name, 'domains': {'domain_guids': domains}, 'scaling': scaling} - + if 'setup' not in self.config.keys(): + self.config['setup'] = {} self._backends.append(be_dict['name']) - if 'backends' not in self.config: - self.config['backends'] = [] - self.config['backends'].append(be_dict) + if 'backends' not in self.config['setup']: + self.config['setup']['backends'] = [] + self.config['setup']['backends'].append(be_dict) def remove_backend(self, backend_name): """ @@ -372,9 +345,9 @@ def remove_backend(self, backend_name): :param backend_name: name of the backend to remove :type backend_name: str """ - for backend in self.config['backends']: + for index, backend in enumerate(self.config['setup']['backends']): if backend['name'] == backend_name: - self.config['backends'].pop(self.config['backends'].index(backend)) + self.config['setup']['backends'].pop(index) def add_preset_to_backend(self, backend_name, preset_name, policies, compression='snappy', encryption='none', fragment_size=2097152): """ @@ -399,11 +372,11 @@ def add_preset_to_backend(self, backend_name, preset_name, policies, compression compression_options = ['snappy', 'bz2', 'none'] if compression not in compression_options: - raise ValueError('Invalid compression format specified, please choose from: "{0}"'.format('", "'.join(compression_options))) + raise ValueError('Invalid compression format specified, please choose from: "{0}"'.format('', ''.join(compression_options))) encryption_options = ['aes-cbc-256', 'aes-ctr-256', 'none'] if encryption not in encryption_options: - raise ValueError('Invalid encryption format specified, please choose from: "{0}"'.format('", "'.join(encryption_options))) + raise ValueError('Invalid encryption format specified, please choose from: "{0}"'.format('', ''.join(encryption_options))) if fragment_size is not None and (not isinstance(fragment_size, int) or not 16 <= fragment_size <= 1024 ** 3): raise ValueError('Fragment size should be a positive integer smaller than 1 GiB') @@ -428,11 +401,11 @@ def add_preset_to_backend(self, backend_name, preset_name, policies, compression 'fragment_size': fragment_size, } self._presets.append(preset_dict['name']) - for i in range(len(self.config['backends'])): - if self.config['backends'][i]['name'] == backend_name: - if 'presets' not in self.config['backends'][i]: - self.config['backends'][i]['presets'] = [] - self.config['backends'][i]['presets'].append(preset_dict) + for index, backend in enumerate(self.config['setup']['backends']): + if backend['name'] == backend_name: + if 'presets' not in backend: + self.config['setup']['backends'][index]['presets'] = [] + self.config['setup']['backends'][index]['presets'].append(preset_dict) def remove_preset_from_backend(self, backend_name, preset_name): """ @@ -443,13 +416,12 @@ def remove_preset_from_backend(self, backend_name, preset_name): :type preset_name: str """ try: - for i in range(len(self.config['backends'])): - if self.config['backends'][i]['name'] == backend_name: - if 'presets' in self.config['backends'][i]: - for j in range(len(self.config['backends'][i]['presets'])): - if self.config['backends'][i]['presets'][j]['name'] == preset_name: - self.config['backends'][i]['presets'].pop(j) - self.config['backends'].remove(i) + for index, backend in enumerate(self.config['setup']['backends']): + if backend['name'] == backend_name: + if 'presets' in backend: + for jndex, preset in enumerate(self.config['backends'][index]['presets']): + if preset['name'] == preset_name: + self.config['setup']['backends'][index]['presets'].pop(jndex) except Exception: pass @@ -481,9 +453,9 @@ def add_osd_to_backend(self, backend_name, osds_on_disks=None, linked_backend=No Toolbox.verify_required_params(required_params=required_params, actual_params=actual_params, verify_keys=True) osd_dict = {} - for i in range(len(self.config['backends'])): - if self.config['backends'][i]['name'] == backend_name: - scaling = self.config['backends'][i]['scaling'] + for index, backend in enumerate(self.config['setup']['backends']): + if backend['name'] == backend_name: + scaling = backend['scaling'] if scaling == 'LOCAL': if osds_on_disks is None: raise ValueError('Osd dictionary required') @@ -496,10 +468,10 @@ def add_osd_to_backend(self, backend_name, osds_on_disks=None, linked_backend=No osd_dict = {linked_backend: linked_preset} else: - print ValueError('invalid scaling ({0}) passed'.format(scaling)) - if 'osds' not in self.config['backends'][i]: - self.config['backends'][i]['osds'] = {} - self.config['backends'][i]['osds'].update(osd_dict) + raise ValueError('invalid scaling ({0}) passed'.format(scaling)) + if 'osds' not in backend: + self.config['setup']['backends'][index]['osds'] = {} + self.config['setup']['backends'][index]['osds'].update(osd_dict) def remove_osd_from_backend(self, osd_identifier, backend_name): """ @@ -510,9 +482,9 @@ def remove_osd_from_backend(self, osd_identifier, backend_name): :type osd_identifier: str """ try: - for i in range(len(self.config['backends'])): - if self.config['backends'][i]['name'] == backend_name: - self.config['backends'][i]['osds'].pop(osd_identifier) + for index, backend in enumerate(self.config['setup']['backends']): + if backend['name'] == backend_name: + self.config['setup']['backends'][index]['osds'].pop(osd_identifier) except Exception: pass @@ -549,7 +521,7 @@ def add_vpool(self, storagerouter_ip, backend_name, preset_name, storage_ip, vpo Toolbox.verify_required_params(required_params=required_params, actual_params=actual_params, verify_keys=True) self._valid_storagerouter(storagerouter_ip=storagerouter_ip) - self._validation_ip(ip=storage_ip) + self._validate_ip(ip=storage_ip) if backend_name not in self._backends: raise ValueError('Provided backend {0} not in known backends'.format(backend_name)) if preset_name not in self._presets: @@ -559,25 +531,13 @@ def add_vpool(self, storagerouter_ip, backend_name, preset_name, storage_ip, vpo 'storage_ip': storage_ip, 'proxies': 1, 'fragment_cache': {'strategy': {'cache_on_read': False, 'cache_on_write': False}, - 'location': 'disk' - }, + 'location': 'disk'}, 'block_cache': {'strategy': {'cache_on_read': False, 'cache_on_write': False}, - 'location': 'disk' - }, - 'storagedriver': {'sco_size': 4, - 'cluster_size': 4, - 'volume_write_buffer': 512, - 'strategy': 'none', - 'global_write_buffer': 20, - 'global_read_buffer': 0, - 'deduplication': 'non_dedupe', - 'dtl_transport': 'tcp', - 'dtl_mode': 'sync' - } + 'location': 'disk'} } - if 'vpools' not in self.config['storagerouters'][storagerouter_ip]: - self.config['storagerouters'][storagerouter_ip]['vpools'] = {} - self.config['storagerouters'][storagerouter_ip]['vpools'][vpool_name] = vpool_dict + if 'vpools' not in self.config['setup']['storagerouters'][storagerouter_ip]: + self.config['setup']['storagerouters'][storagerouter_ip]['vpools'] = {} + self.config['setup']['storagerouters'][storagerouter_ip]['vpools'][vpool_name] = vpool_dict def remove_vpool(self, storagerouter_ip, vpool_name): """ @@ -588,7 +548,7 @@ def remove_vpool(self, storagerouter_ip, vpool_name): :type vpool_name: str """ try: - self.config['storagerouters'][storagerouter_ip]['vpools'].pop(vpool_name) + self.config['setup']['storagerouters'][storagerouter_ip]['vpools'].pop(vpool_name) except Exception: pass @@ -622,7 +582,7 @@ def change_cache(self, storagerouter_ip, vpool, block_cache=True, fragment_cache 'on_write': on_write} Toolbox.verify_required_params(required_params=required_params, actual_params=actual_params, verify_keys=True) try: - vpool = self.config['storagerouters'][storagerouter_ip]['vpools'][vpool] + vpool = self.config['setup']['storagerouters'][storagerouter_ip]['vpools'][vpool] except KeyError: raise ValueError('Vpool {0} not found'.format(vpool)) if block_cache is True: @@ -632,12 +592,66 @@ def change_cache(self, storagerouter_ip, vpool, block_cache=True, fragment_cache vpool['fragment_cache']['strategy']['cache_on_read'] = on_read vpool['fragment_cache']['strategy']['cache_on_write'] = on_write + def update_storagedriver_of_vpool(self, sr_ip, vpool_name, sr_params=None): + ''' + Update all or some data of a storagedriver, assigned to a vpool on a specific storagerouter. + :param sr_ip: ip of the storagerouter on which the vpool is located + :type sr_ip: str + :param vpool_name: name of the vpool of which to update the storagedriver data + :type vpool_name: str + :param sr_params: parameters to update of the referenced storagedriver + :type sr_params: dict + ''' + required_params = {'sco_size': (int, StorageDriverClient.TLOG_MULTIPLIER_MAP.keys()), + 'cluster_size': (int, StorageDriverClient.CLUSTER_SIZES), + 'volume_write_buffer': (int, {'min': 128, 'max': 10240}, False), + 'global_read_buffer': (int, {'min': 128, 'max': 10240}, False), + 'strategy': (str, None, False), + 'deduplication': (str, None, False), + 'dtl_transport': (str, StorageDriverClient.VPOOL_DTL_TRANSPORT_MAP.keys()), + 'dtl_mode': (str, StorageDriverClient.VPOOL_DTL_MODE_MAP.keys())} + + default_params = {'sco_size': 4, + 'cluster_size': 4, + 'volume_write_buffer': 512, + 'strategy': 'none', + 'global_write_buffer': 128, + 'global_read_buffer': 128, + 'deduplication': 'non_dedupe', + 'dtl_transport': 'tcp', + 'dtl_mode': 'sync'} + + if sr_params is None: + sr_params = {} + default_params.update(sr_params) + if not isinstance(default_params, dict): + raise ValueError('Parameters should be of type "dict"') + Toolbox.verify_required_params(required_params, default_params) + if sr_ip not in self.config['setup']['storagerouters'].keys(): + raise KeyError('Storagerouter with ip is not defined') + if vpool_name not in self.config['setup']['storagerouters'][sr_ip]['vpools']: + raise KeyError('Vpool with name {0} is not defined on storagerouter with ip {1}'.format(vpool_name, sr_ip)) + self.config['setup']['storagerouters'][sr_ip]['vpools'][vpool_name]['storagedriver'] = default_params + + def remove_storagedriver_from_vpool(self, sr_ip, vpool_name): + ''' + Remove the storagedriver details on given vpool of given storagerouter. + :param sr_ip: ip of the storagerouter on which the vpool is located + :type sr_ip: str + :param vpool_name: name of the vpool of which to update the storagedriver data + :type vpool_name: str + ''' + try: + self.config['setup']['storagerouters'][sr_ip]['vpools'][vpool_name].pop('storagedriver') + except Exception: + pass + def _valid_storagerouter(self, storagerouter_ip): - self._validation_ip(storagerouter_ip) - if storagerouter_ip not in self.config['storagerouters']: + self._validate_ip(storagerouter_ip) + if storagerouter_ip not in self.config['setup']['storagerouters']: raise ValueError('Storagerouter with ip {0} not found in json'.format(storagerouter_ip)) - def _validation_ip(self, ip): + def _validate_ip(self, ip): required_params = {'storagerouter_ip': (str, Toolbox.regex_ip, True)} try: Toolbox.verify_required_params(required_params=required_params, actual_params={'storagerouter_ip': ip}, verify_keys=True) @@ -671,5 +685,5 @@ def check_policy(self): if 0 in clone.values(): raise ValueError('Policy: {0}: {1} cannot be equal to zero'.format(self.get_policy_as_list(), ''.join([i[0] for i in clone.items() if i[1] == 0]))) - for i in policies: - _Policy(i).check_policy() + for p in policies: + _Policy(p).check_policy() diff --git a/helpers/storagerouter.py b/helpers/storagerouter.py index 22503d4..7d006d5 100644 --- a/helpers/storagerouter.py +++ b/helpers/storagerouter.py @@ -119,3 +119,32 @@ def sync_disk_with_reality(cls, guid=None, ip=None, timeout=None): raise ValueError('No guid or ip passed.') task_id = cls.api.post(api='/storagerouters/{0}/rescan_disks/'.format(storagerouter_guid), data=None) return cls.api.wait_for_task(task_id=task_id, timeout=timeout) + + @classmethod + def get_storagerouters_by_role(cls): + """ + Gets storagerouters based on roles + :return: + """ + voldr_str_1 = None # Will act as volumedriver node + voldr_str_2 = None # Will act as volumedriver node + compute_str = None # Will act as compute node + if isinstance(cls.HYPERVISOR_INFO, dict): # Hypervisor section is filled in -> VM environment + nodes_info = {} + for hv_ip, hv_info in cls.HYPERVISOR_INFO['vms'].iteritems(): + nodes_info[hv_ip] = hv_info + elif cls.SETUP_CFG['ci'].get('nodes') is not None: # Physical node section -> Physical environment + nodes_info = cls.SETUP_CFG['ci']['nodes'] + else: + raise RuntimeError('Unable to fetch node information. Either hypervisor section or node section is missing!') + for node_ip, node_details in nodes_info.iteritems(): + if node_details['role'] == "VOLDRV": + if voldr_str_1 is None: + voldr_str_1 = StoragerouterHelper.get_storagerouter_by_ip(node_ip) + elif voldr_str_2 is None: + voldr_str_2 = StoragerouterHelper.get_storagerouter_by_ip(node_ip) + elif node_details['role'] == "COMPUTE" and compute_str is None: + compute_str = StoragerouterHelper.get_storagerouter_by_ip(node_ip) + assert voldr_str_1 is not None and voldr_str_2 is not None and compute_str is not None,\ + 'Could not fetch 2 storagedriver nodes and 1 compute node based on the setup.json config.' + return voldr_str_1, voldr_str_2, compute_str \ No newline at end of file diff --git a/helpers/tests/jsongeneratortestcase.py b/helpers/tests/jsongeneratortestcase.py index 075bb42..1e5abda 100644 --- a/helpers/tests/jsongeneratortestcase.py +++ b/helpers/tests/jsongeneratortestcase.py @@ -31,23 +31,26 @@ def test_structure(self): self.assertTrue(isinstance(self.generator.config, dict)) def test_model_ci(self): - self.generator.update_ci(passed_required_params={'setup': True, 'grid_ip': self.ip_1}) + self.generator.update_ci(ci_params={'setup': True, 'grid_ip': self.ip_1}) self.assertTrue(isinstance(self.generator.config['ci']['setup'], bool)) self.assertEquals(len(self.generator.config['ci']), 10) def test_add_hypervisor(self): - self.generator.update_ci(passed_required_params={'setup': True, 'grid_ip': self.ip_1}) + self.generator.update_ci(ci_params={'setup': True, 'grid_ip': self.ip_1}) with self.assertRaises(ValueError): self.generator.add_hypervisor(hypervisor_ip='5') self.generator.add_hypervisor(hypervisor_ip=self.ip_1) self.assertEquals(len(self.generator.config['ci']), 11) + self.assertTrue('vms' in self.generator.config['ci']['hypervisor'].keys()) + self.assertTrue('ubuntu_node_0.1' in self.generator.config['ci']['hypervisor']['vms'][self.ip_1]['name']) + def test_remove_hypervisor(self): - self.generator.update_ci(passed_required_params={'setup': True, 'grid_ip': self.ip_1}) + self.generator.update_ci(ci_params={'setup': True, 'grid_ip': self.ip_1}) self.generator.add_hypervisor(hypervisor_ip=self.ip_1) - self.generator.add_hypervisor(hypervisor_ip=self.ip_2) self.generator.remove_hypervisor(hypervisor_ip=self.ip_2) - self.assertEquals(len(self.generator.config['ci']['hypervisors']), 1) + self.generator.add_hypervisor(hypervisor_ip=self.ip_1) + self.assertEquals(len(self.generator.config['ci']['hypervisor']), 5) def test_model_scenarios(self): self.generator.update_scenarios() @@ -58,7 +61,7 @@ def test_model_scenarios(self): def test_add_domain(self): self.generator.add_domain('domain1') self.generator.add_domain('domain2') - self.assertEquals(len(self.generator.config['domains']), 2) + self.assertEquals(len(self.generator.config['setup']['domains']), 2) with self.assertRaises(ValueError): self.generator.add_domain(7) @@ -66,7 +69,7 @@ def test_remove_domain(self): self.generator.add_domain('domain1') self.generator.add_domain('domain2') self.generator.remove_domain('domain1') - self.assertEquals(len(self.generator.config['domains']), 1) + self.assertEquals(len(self.generator.config['setup']['domains']), 1) def test_storagerouter_addition_removal(self): self.generator.add_domain('domain1') @@ -74,66 +77,66 @@ def test_storagerouter_addition_removal(self): with self.assertRaises(ValueError): self.generator.add_storagerouter(storagerouter_ip='100', hostname='hostname') self.generator.add_storagerouter(storagerouter_ip=self.ip_1, hostname='hostname') - self.assertTrue(self.ip_1 in self.generator.config['storagerouters'].keys()) + self.assertTrue(self.ip_1 in self.generator.config['setup']['storagerouters'].keys()) self.generator.add_storagerouter(storagerouter_ip=self.ip_2, hostname='hostname') self.generator.remove_storagerouter(storagerouter_ip=self.ip_2) - self.assertFalse(self.ip_2 in self.generator.config['storagerouters'].keys()) + self.assertFalse(self.ip_2 in self.generator.config['setup']['storagerouters'].keys()) with self.assertRaises(ValueError): self.generator.add_disk_to_sr(storagerouter_ip='5', name='disk1', roles=['SCRUB', 'DTL']) with self.assertRaises(ValueError): self.generator.add_disk_to_sr(storagerouter_ip=self.ip_1, name='disk1', roles=['bla']) self.generator.add_disk_to_sr(storagerouter_ip=self.ip_1, name='disk1', roles=['SCRUB', 'DTL']) - self.assertTrue('disk1' in self.generator.config['storagerouters'][self.ip_1]['disks']) - self.assertEquals(len(self.generator.config['storagerouters'][self.ip_1]['disks']['disk1']['roles']), 2) + self.assertTrue('disk1' in self.generator.config['setup']['storagerouters'][self.ip_1]['disks']) + self.assertEquals(len(self.generator.config['setup']['storagerouters'][self.ip_1]['disks']['disk1']['roles']), 2) self.generator.add_disk_to_sr(storagerouter_ip=self.ip_1, name='disk2', roles=['DB']) self.generator.remove_disk_from_sr(storagerouter_ip=self.ip_1, name='disk2') - self.assertFalse('disk2' in self.generator.config['storagerouters'][self.ip_1]) + self.assertFalse('disk2' in self.generator.config['setup']['storagerouters'][self.ip_1]) self.generator.add_domain_to_sr(storagerouter_ip=self.ip_1, name='domain1') self.generator.add_domain_to_sr(storagerouter_ip=self.ip_1, name='domain1', recovery=True) self.generator.add_domain_to_sr(storagerouter_ip=self.ip_1, name='domain2') self.generator.remove_domain_from_sr(storagerouter_ip=self.ip_1, name='domain2') - self.assertFalse('domain2' in self.generator.config['storagerouters'][self.ip_1]) + self.assertFalse('domain2' in self.generator.config['setup']['storagerouters'][self.ip_1]) - self.assertEquals(len(self.generator.config['storagerouters'][self.ip_1]['domains']['domain_guids']), 1) - self.assertEquals(len(self.generator.config['storagerouters'][self.ip_1]['domains']['recovery_domain_guids']), 1) + self.assertEquals(len(self.generator.config['setup']['storagerouters'][self.ip_1]['domains']['domain_guids']), 1) + self.assertEquals(len(self.generator.config['setup']['storagerouters'][self.ip_1]['domains']['recovery_domain_guids']), 1) self.generator.add_domain_to_sr(storagerouter_ip=self.ip_1, name='domain2') self.generator.add_domain_to_sr(storagerouter_ip=self.ip_1, name='domain2', recovery=True) - self.assertEquals(len(self.generator.config['storagerouters'][self.ip_1]['domains']['domain_guids']), 2) - self.assertEquals(len(self.generator.config['storagerouters'][self.ip_1]['domains']['recovery_domain_guids']), 2) + self.assertEquals(len(self.generator.config['setup']['storagerouters'][self.ip_1]['domains']['domain_guids']), 2) + self.assertEquals(len(self.generator.config['setup']['storagerouters'][self.ip_1]['domains']['recovery_domain_guids']), 2) def test_backend_addition_removal(self): self.generator.add_domain('domain1') self.generator.add_domain('domain2') self.generator.add_backend(backend_name='mybackend', domains=['domain1']) - self.assertItemsEqual(self.generator.config['backends'][0].keys(), ['name', 'domains', 'scaling']) + self.assertItemsEqual(self.generator.config['setup']['backends'][0].keys(), ['name', 'domains', 'scaling']) self.generator.add_backend(backend_name='mybackend02', domains=['domain1'], scaling='GLOBAL') - self.assertItemsEqual(self.generator.config['backends'][1].keys(), ['name', 'domains', 'scaling']) + self.assertItemsEqual(self.generator.config['setup']['backends'][1].keys(), ['name', 'domains', 'scaling']) self.generator.add_preset_to_backend(backend_name='mybackend02', preset_name='mypreset', policies=[[1, 2, 2, 1]]) - self.assertEqual(self.generator.config['backends'][1]['name'], 'mybackend02') + self.assertEqual(self.generator.config['setup']['backends'][1]['name'], 'mybackend02') with self.assertRaises(ValueError): self.generator.add_preset_to_backend(backend_name='non-existing_backend', preset_name='mypreset', policies=[1, 2, 2, 1]) self.generator.add_osd_to_backend(backend_name='mybackend', osds_on_disks={self.ip_1: {'vdb': 2}}) - self.assertEquals(len(self.generator.config['backends'][0]['osds']), 1) + self.assertEquals(len(self.generator.config['setup']['backends'][0]['osds']), 1) self.generator.add_osd_to_backend(backend_name='mybackend', osds_on_disks={self.ip_2: {'vdb': 2}}) - self.assertEquals(len(self.generator.config['backends'][0]['osds']), 2) + self.assertEquals(len(self.generator.config['setup']['backends'][0]['osds']), 2) self.generator.remove_osd_from_backend(backend_name='mybackend', osd_identifier=self.ip_2) - self.assertEquals(len(self.generator.config['backends'][0]['osds']), 1) + self.assertEquals(len(self.generator.config['setup']['backends'][0]['osds']), 1) - self.assertEqual(self.generator.config['backends'][0]['osds'][self.ip_1]['vdb'], 2) + self.assertEqual(self.generator.config['setup']['backends'][0]['osds'][self.ip_1]['vdb'], 2) with self.assertRaises(ValueError): self.generator.add_osd_to_backend(backend_name='mybackend02', osds_on_disks={self.ip_1: {'vdb': 2}}) self.generator.add_osd_to_backend(backend_name='mybackend02', linked_backend='mybackend', linked_preset='mypreset') - self.assertEqual(self.generator.config['backends'][1]['osds']['mybackend'], 'mypreset') + self.assertEqual(self.generator.config['setup']['backends'][1]['osds']['mybackend'], 'mypreset') self.generator.remove_backend('mybackend02') - self.assertNotEquals(len(self.generator.config['backends']), 3) + self.assertNotEquals(len(self.generator.config['setup']['backends']), 3) def test_vpool_addition_removal(self): vpoolname = 'vpool01' @@ -147,15 +150,26 @@ def test_vpool_addition_removal(self): self.generator.add_vpool(storagerouter_ip=self.ip_1, vpool_name=vpoolname, backend_name='mybackend', preset_name='non-existing_preset', storage_ip=self.ip_1) self.generator.add_vpool(storagerouter_ip=self.ip_1, vpool_name=vpoolname, backend_name='mybackend', preset_name='mypreset', storage_ip=self.ip_1) - self.assertTrue(vpoolname in self.generator.config['storagerouters'][self.ip_1]['vpools'].keys()) - self.assertTrue('storagedriver' in self.generator.config['storagerouters'][self.ip_1]['vpools'][vpoolname].keys()) - self.generator.add_vpool(storagerouter_ip=self.ip_1, backend_name='mybackend', preset_name='mypreset', storage_ip=self.ip_1, vpool_name='vpool1000') self.generator.remove_vpool(storagerouter_ip=self.ip_1, vpool_name='vpool1000') - self.assertFalse('vpool1000' in self.generator.config['storagerouters'][self.ip_1]['vpools']) + self.assertFalse('vpool1000' in self.generator.config['setup']['storagerouters'][self.ip_1]['vpools']) + + def test_storagedriver_addition_removal(self): + vpoolname = 'vpool01' + self.generator.add_domain('domain1') + self.generator.add_storagerouter(storagerouter_ip=self.ip_1, hostname='hostname') + self.generator.add_backend(backend_name='mybackend', domains=['domain1']) + self.generator.add_preset_to_backend(backend_name='mybackend', preset_name='mypreset', policies=[[1, 2, 2, 1]]) + self.generator.add_vpool(storagerouter_ip=self.ip_1, vpool_name=vpoolname, backend_name='mybackend', preset_name='mypreset', storage_ip=self.ip_1) + self.generator.update_storagedriver_of_vpool(sr_ip=self.ip_1, vpool_name=vpoolname, sr_params={'sco_size': 8}) + path = self.generator.config['setup']['storagerouters'][self.ip_1]['vpools'][vpoolname] + self.assertEquals(path['storagedriver']['sco_size'], 8) + self.assertTrue(isinstance(path['storagedriver']['deduplication'], str)) + self.generator.remove_storagedriver_from_vpool(sr_ip=self.ip_1, vpool_name=vpoolname) + self.assertFalse('storagedriver' in path.keys()) def test_full_flow(self): - self.generator.update_ci(passed_required_params={'setup': True, 'grid_ip': self.ip_1}) + self.generator.update_ci(ci_params={'setup': True, 'grid_ip': self.ip_1}) self.generator.add_hypervisor(hypervisor_ip=self.ip_1, virtual_machines={self.ip_2: {'name': 'ubuntu16.04-ovsnode01-setup1', 'role': 'COMPUTE'}, @@ -202,6 +216,7 @@ def test_full_flow(self): self.generator.add_vpool(storagerouter_ip=self.ip_2, vpool_name='myvpool01', backend_name='mybackend-global', preset_name='mypreset', storage_ip=self.ip_1) self.generator.change_cache(storagerouter_ip=self.ip_2, vpool='myvpool01', block_cache=True, fragment_cache=False, on_write=False) self.generator.change_cache(storagerouter_ip=self.ip_2, vpool='myvpool01', fragment_cache=True, block_cache=False, on_read=False, on_write=True) + self.generator.update_storagedriver_of_vpool(sr_ip=self.ip_2, vpool_name='myvpool01', sr_params={'sco_size': 8}) # add storagerouter2 @@ -216,6 +231,7 @@ def test_full_flow(self): self.generator.add_vpool(storagerouter_ip=self.ip_3, vpool_name='myvpool01', backend_name='mybackend-global', preset_name='mypreset', storage_ip=self.ip_1) self.generator.change_cache(storagerouter_ip=self.ip_3, vpool='myvpool01', fragment_cache=True, block_cache=True, on_write=False, on_read=True) + self.generator.update_storagedriver_of_vpool(sr_ip=self.ip_3, vpool_name='myvpool01') # add storagerouter 3 @@ -228,20 +244,22 @@ def test_full_flow(self): self.generator.add_disk_to_sr(storagerouter_ip=self.ip_4, name='sdb', roles=['DB']) self.generator.add_disk_to_sr(storagerouter_ip=self.ip_4, name='sdc', roles=['SCRUB']) self.generator.add_vpool(storagerouter_ip=self.ip_4, vpool_name='myvpool01', backend_name='mybackend-global', preset_name='mypreset', storage_ip=self.ip_1) + self.generator.update_storagedriver_of_vpool(sr_ip=self.ip_4, vpool_name='myvpool01', sr_params={'global_read_buffer': 256}) expected_output = {u'ci': {u'cleanup': False, u'config_manager': u'arakoon', u'fail_on_failed_scenario': True, u'grid_ip': u'127.0.0.1', - u'hypervisors': {u'127.0.0.1': {u'password': u'rooter', - u'type': u'KVM', - u'user': u'root', - u'vms': {u'127.0.0.2': {u'name': u'ubuntu16.04-ovsnode01-setup1', - u'role': u'COMPUTE'}, - u'127.0.0.3': {u'name': u'ubuntu16.04-ovsnode02-setup1', - u'role': u'VOLDRV'}, - u'127.0.0.4': {u'name': u'ubuntu16.04-ovsnode03-setup1', - u'role': u'VOLDRV'}}}}, + u'hypervisor': {u'password': u'rooter', + u'type': u'KVM', + u'user': u'root', + u'ip': u'127.0.0.1', + u'vms': {u'127.0.0.2': {u'name': u'ubuntu16.04-ovsnode01-setup1', + u'role': u'COMPUTE'}, + u'127.0.0.3': {u'name': u'ubuntu16.04-ovsnode02-setup1', + u'role': u'VOLDRV'}, + u'127.0.0.4': {u'name': u'ubuntu16.04-ovsnode03-setup1', + u'role': u'VOLDRV'}}}, u'local_hypervisor': {u'password': u'rooter', u'type': u'KVM', u'user': u'root'}, @@ -252,118 +270,120 @@ def test_full_flow(self): u'shell': {u'password': u'rooter', u'username': u'root'}}, u'version': u'andes'}, u'scenarios': [u'ALL'], - u'backends': [{u'domains': {u'domain_guids': [u'Roubaix']}, - u'name': u'mybackend', - u'osds': {u'127.0.0.2': {u'sde': 2, u'sdf': 2}, - u'127.0.0.3': {u'sde': 2, u'sdf': 2}, - u'127.0.0.4': {u'sde': 2, u'sdf': 2}}, - u'presets': [{u'compression': u'snappy', - u'encryption': u'none', - u'fragment_size': 2097152, - u'name': u'mypreset', - u'policies': [[1, 2, 2, 1]]}], - u'scaling': u'LOCAL'}, - {u'domains': {u'domain_guids': [u'Gravelines']}, - u'name': u'mybackend02', - u'osds': {u'127.0.0.2': {u'sdg': 2}, - u'127.0.0.3': {u'sdg': 2}, - u'127.0.0.4': {u'sdg': 2}}, - u'presets': [{u'compression': u'snappy', - u'encryption': u'none', - u'fragment_size': 2097152, - u'name': u'mypreset', - u'policies': [[1, 2, 2, 1]]}], - u'scaling': u'LOCAL'}, - {u'domains': {u'domain_guids': [u'Roubaix', u'Gravelines', u'Strasbourg']}, - u'name': u'mybackend-global', - u'osds': {u'mybackend': u'mypreset', u'mybackend02': u'mypreset'}, - u'presets': [{u'compression': u'snappy', - u'encryption': u'none', - u'fragment_size': 2097152, - u'name': u'mypreset', - u'policies': [[1, 2, 2, 1]]}], - u'scaling': u'GLOBAL'}], - - u'domains': [u'Roubaix', u'Gravelines', u'Strasbourg'], - u'storagerouters': {u'127.0.0.2': {u'disks': {u'sda': {u'roles': [u'WRITE', - u'DTL']}, - u'sdb': {u'roles': [u'DB']}, - u'sdc': {u'roles': [u'SCRUB']}}, - u'domains': {u'domain_guids': [u'Roubaix'], - u'recovery_domain_guids': [u'Gravelines', u'Strasbourg']}, - u'hostname': u'ovs-node-1-1604', - u'vpools': {u'myvpool01': {u'backend_name': u'mybackend-global', - u'block_cache': {u'location': u'disk', - u'strategy': {u'cache_on_read': True, u'cache_on_write': False}}, - u'fragment_cache': {u'location': u'disk', - u'strategy': {u'cache_on_read': False, u'cache_on_write': True}}, - u'preset': u'mypreset', - u'proxies': 1, - u'storage_ip': u'127.0.0.1', - u'storagedriver': {u'cluster_size': 4, - u'dtl_mode': u'sync', - u'dtl_transport': u'tcp', - u'global_write_buffer': 20, - u'global_read_buffer': 0, - u'deduplication': "non_dedupe", - u'strategy': "none", - u'sco_size': 4, - u'volume_write_buffer': 512}}}}, - u'127.0.0.3': {u'disks': {u'sda': {u'roles': [u'WRITE', u'DTL']}, - u'sdb': {u'roles': [u'DB']}, - u'sdc': {u'roles': [u'SCRUB']}}, - u'domains': {u'domain_guids': [u'Gravelines'], - u'recovery_domain_guids': [u'Roubaix', u'Strasbourg']}, - u'hostname': u'ovs-node-2-1604', - u'vpools': {u'myvpool01': {u'backend_name': u'mybackend-global', - u'block_cache': {u'location': u'disk', - u'strategy': {u'cache_on_read': True, u'cache_on_write': False}}, - u'fragment_cache': {u'location': u'disk', - u'strategy': {u'cache_on_read': True, u'cache_on_write': False}}, - u'preset': u'mypreset', - u'proxies': 1, - u'storage_ip': u'127.0.0.1', - u'storagedriver': {u'cluster_size': 4, - u'dtl_mode': u'sync', - u'dtl_transport': u'tcp', - u'global_write_buffer': 20, - u'global_read_buffer': 0, - u'deduplication': "non_dedupe", - u'strategy': "none", - u'sco_size': 4, - u'volume_write_buffer': 512}}}}, - u'127.0.0.4': {u'disks': {u'sda': {u'roles': [u'WRITE', u'DTL']}, - u'sdb': {u'roles': [u'DB']}, - u'sdc': {u'roles': [u'SCRUB']}}, - u'domains': {u'domain_guids': [u'Gravelines'], - u'recovery_domain_guids': [u'Roubaix', u'Strasbourg']}, - u'hostname': u'ovs-node-3-1604', - u'vpools': {u'myvpool01': {u'backend_name': u'mybackend-global', - u'block_cache': {u'location': u'disk', - u'strategy': {u'cache_on_read': False, u'cache_on_write': False}}, - u'fragment_cache': {u'location': u'disk', - u'strategy': {u'cache_on_read': False, u'cache_on_write': False}}, - u'preset': u'mypreset', - u'proxies': 1, - u'storage_ip': u'127.0.0.1', - u'storagedriver': {u'cluster_size': 4, - u'dtl_mode': u'sync', - u'dtl_transport': u'tcp', - u'global_write_buffer': 20, - u'global_read_buffer': 0, - u'deduplication': "non_dedupe", - u'strategy': "none", - u'sco_size': 4, - u'volume_write_buffer': 512}}}} - } + u'setup': { + u'backends': [{u'domains': {u'domain_guids': [u'Roubaix']}, + u'name': u'mybackend', + u'osds': {u'127.0.0.2': {u'sde': 2, u'sdf': 2}, + u'127.0.0.3': {u'sde': 2, u'sdf': 2}, + u'127.0.0.4': {u'sde': 2, u'sdf': 2}}, + u'presets': [{u'compression': u'snappy', + u'encryption': u'none', + u'fragment_size': 2097152, + u'name': u'mypreset', + u'policies': [[1, 2, 2, 1]]}], + u'scaling': u'LOCAL'}, + {u'domains': {u'domain_guids': [u'Gravelines']}, + u'name': u'mybackend02', + u'osds': {u'127.0.0.2': {u'sdg': 2}, + u'127.0.0.3': {u'sdg': 2}, + u'127.0.0.4': {u'sdg': 2}}, + u'presets': [{u'compression': u'snappy', + u'encryption': u'none', + u'fragment_size': 2097152, + u'name': u'mypreset', + u'policies': [[1, 2, 2, 1]]}], + u'scaling': u'LOCAL'}, + {u'domains': {u'domain_guids': [u'Roubaix', u'Gravelines', u'Strasbourg']}, + u'name': u'mybackend-global', + u'osds': {u'mybackend': u'mypreset', u'mybackend02': u'mypreset'}, + u'presets': [{u'compression': u'snappy', + u'encryption': u'none', + u'fragment_size': 2097152, + u'name': u'mypreset', + u'policies': [[1, 2, 2, 1]]}], + u'scaling': u'GLOBAL'}], + + u'domains': [u'Roubaix', u'Gravelines', u'Strasbourg'], + u'storagerouters': {u'127.0.0.2': {u'disks': {u'sda': {u'roles': [u'WRITE', + u'DTL']}, + u'sdb': {u'roles': [u'DB']}, + u'sdc': {u'roles': [u'SCRUB']}}, + u'domains': {u'domain_guids': [u'Roubaix'], + u'recovery_domain_guids': [u'Gravelines', u'Strasbourg']}, + u'hostname': u'ovs-node-1-1604', + u'vpools': {u'myvpool01': {u'backend_name': u'mybackend-global', + u'block_cache': {u'location': u'disk', + u'strategy': {u'cache_on_read': True, u'cache_on_write': False}}, + u'fragment_cache': {u'location': u'disk', + u'strategy': {u'cache_on_read': False, u'cache_on_write': True}}, + u'preset': u'mypreset', + u'proxies': 1, + u'storage_ip': u'127.0.0.1', + u'storagedriver': {u'cluster_size': 4, + u'dtl_mode': u'sync', + u'dtl_transport': u'tcp', + u'global_write_buffer': 128, + u'global_read_buffer': 128, + u'deduplication': "non_dedupe", + u'strategy': "none", + u'sco_size': 8, + u'volume_write_buffer': 512}}}}, + u'127.0.0.3': {u'disks': {u'sda': {u'roles': [u'WRITE', u'DTL']}, + u'sdb': {u'roles': [u'DB']}, + u'sdc': {u'roles': [u'SCRUB']}}, + u'domains': {u'domain_guids': [u'Gravelines'], + u'recovery_domain_guids': [u'Roubaix', u'Strasbourg']}, + u'hostname': u'ovs-node-2-1604', + u'vpools': {u'myvpool01': {u'backend_name': u'mybackend-global', + u'block_cache': {u'location': u'disk', + u'strategy': {u'cache_on_read': True, u'cache_on_write': False}}, + u'fragment_cache': {u'location': u'disk', + u'strategy': {u'cache_on_read': True, u'cache_on_write': False}}, + u'preset': u'mypreset', + u'proxies': 1, + u'storage_ip': u'127.0.0.1', + u'storagedriver': {u'cluster_size': 4, + u'dtl_mode': u'sync', + u'dtl_transport': u'tcp', + u'global_write_buffer': 128, + u'global_read_buffer': 128, + u'deduplication': "non_dedupe", + u'strategy': "none", + u'sco_size': 4, + u'volume_write_buffer': 512}}}}, + u'127.0.0.4': {u'disks': {u'sda': {u'roles': [u'WRITE', u'DTL']}, + u'sdb': {u'roles': [u'DB']}, + u'sdc': {u'roles': [u'SCRUB']}}, + u'domains': {u'domain_guids': [u'Gravelines'], + u'recovery_domain_guids': [u'Roubaix', u'Strasbourg']}, + u'hostname': u'ovs-node-3-1604', + u'vpools': {u'myvpool01': {u'backend_name': u'mybackend-global', + u'block_cache': {u'location': u'disk', + u'strategy': {u'cache_on_read': False, u'cache_on_write': False}}, + u'fragment_cache': {u'location': u'disk', + u'strategy': {u'cache_on_read': False, u'cache_on_write': False}}, + u'preset': u'mypreset', + u'proxies': 1, + u'storage_ip': u'127.0.0.1', + u'storagedriver': {u'cluster_size': 4, + u'dtl_mode': u'sync', + u'dtl_transport': u'tcp', + u'global_write_buffer': 128, + u'global_read_buffer': 256, + u'deduplication': "non_dedupe", + u'strategy': "none", + u'sco_size': 4, + u'volume_write_buffer': 512}}}}} + } } self.assertDictEqual(self.generator.config['ci'], expected_output[u'ci']) - self.assertEqual(self.generator.config['domains'], expected_output[u'domains']) - self.assertDictEqual(self.generator.config['storagerouters'], expected_output[u'storagerouters']) - self.assertEqual(self.generator.config['backends'], expected_output[u'backends']) + self.assertEqual(self.generator.config['setup']['domains'], expected_output['setup'][u'domains']) + self.assertDictEqual(self.generator.config['setup']['storagerouters'], expected_output['setup'][u'storagerouters']) + self.assertEqual(self.generator.config['setup']['backends'], expected_output['setup'][u'backends']) self.assertDictEqual(self.generator.config, expected_output) + if __name__ == '__main__': unittest.main() diff --git a/helpers/vdisk.py b/helpers/vdisk.py index fa8c1ba..0ace9c1 100644 --- a/helpers/vdisk.py +++ b/helpers/vdisk.py @@ -108,6 +108,7 @@ def get_snapshot_by_guid(snapshot_guid, vdisk_name, vpool_name): .format(snapshot_guid, vdisk_name, vpool_name)) + @classmethod def get_config_params(cls, vdisk_name, vpool_name, timeout=GET_CONFIG_PARAMS_TIMEOUT): """ Fetch the config parameters of a vDisk @@ -145,6 +146,7 @@ def get_config_params(cls, vdisk_name, vpool_name, timeout=GET_CONFIG_PARAMS_TIM return task_result[1] + @classmethod def scrub_vdisk(cls, vdisk_guid, timeout=15 * 60, wait=True): """ Scrub a specific vdisk diff --git a/setup/arakoon.py b/setup/arakoon.py index b990d67..cbef2a5 100644 --- a/setup/arakoon.py +++ b/setup/arakoon.py @@ -21,6 +21,8 @@ from ovs.lib.alba import AlbaController from ..helpers.backend import BackendHelper from ..validate.decorators import required_backend, required_arakoon_cluster +from ..validate.backend import BackendValidation + class ArakoonSetup(object): @@ -159,3 +161,44 @@ def checkup_nsm_hosts(albabackend_name, amount): """ alba_backend_guid = BackendHelper.get_alba_backend_guid_by_name(albabackend_name) return AlbaController.nsm_checkup(backend_guid=alba_backend_guid, min_nsms=int(amount)) + + + @staticmethod + def setup_external_arakoons(backend): + """ + Setup external arakoons for a backend + + :param backend: all backend details + :type backend: dict + :return: mapped external arakoons + :rtype: dict + """ + + # if backend does not exists, deploy the external arakoons + if not BackendValidation.check_backend(backend_name=backend['name']): + external_arakoon_mapping = {} + for ip, arakoons in backend['external_arakoon'].iteritems(): + for arakoon_name, arakoon_settings in arakoons.iteritems(): + # check if we already created one or not + if arakoon_name not in external_arakoon_mapping: + # if not created yet, create one and map it + external_arakoon_mapping[arakoon_name] = {} + external_arakoon_mapping[arakoon_name]['master'] = ip + external_arakoon_mapping[arakoon_name]['all'] = [ip] + ArakoonSetup.add_arakoon(cluster_name=arakoon_name, storagerouter_ip=ip, + cluster_basedir=arakoon_settings['base_dir'], + service_type=arakoon_settings['type']) + else: + # if created, extend it and map it + external_arakoon_mapping[arakoon_name]['all'].append(ip) + ArakoonSetup.extend_arakoon(cluster_name=arakoon_name, + master_storagerouter_ip=external_arakoon_mapping[arakoon_name]['master'], + storagerouter_ip=ip, + cluster_basedir=arakoon_settings['base_dir'], + service_type=arakoon_settings['type'], + clustered_nodes=external_arakoon_mapping[arakoon_name]['all']) + return external_arakoon_mapping + else: + ArakoonSetup.LOGGER.info("Skipping external arakoon creation because backend `{0}` already exists" + .format(backend['name'])) + return \ No newline at end of file diff --git a/setup/vdisk.py b/setup/vdisk.py index 4641bd3..3449804 100644 --- a/setup/vdisk.py +++ b/setup/vdisk.py @@ -335,7 +335,7 @@ def rollback_to_snapshot(cls, vdisk_name, vpool_name, snapshot_id, timeout=ROLLB @classmethod @required_vdisk - def set_config_params(vdisk_name, vpool_name, config, timeout=SET_CONFIG_VDISK_TIMEOUT): + def set_config_params(cls, vdisk_name, vpool_name, config, timeout=SET_CONFIG_VDISK_TIMEOUT): """ Rollback a vdisk to a certain snapshot From 2c33b2f34a632ff32f7e3e21878fa5f04a1104f1 Mon Sep 17 00:00:00 2001 From: Simon Van den Bossche Date: Tue, 2 Jan 2018 10:46:58 +0100 Subject: [PATCH 15/28] fix review: import comments --- helpers/backend.py | 2 +- helpers/fstab.py | 2 +- helpers/storagerouter.py | 1 - remove/roles.py | 1 - setup/arakoon.py | 1 - setup/backend.py | 1 - 6 files changed, 2 insertions(+), 6 deletions(-) diff --git a/helpers/backend.py b/helpers/backend.py index 6197de4..57c53a5 100644 --- a/helpers/backend.py +++ b/helpers/backend.py @@ -162,7 +162,7 @@ def get_backend_local_stack(cls, albabackend_name): 'contents': 'local_stack', } return cls.api.get(api='/alba/backends/{0}/'.format(BackendHelper.get_alba_backend_guid_by_name(albabackend_name)), - params={'queryparams': options}) + params={'queryparams': options}) @staticmethod def get_alba_backends(): diff --git a/helpers/fstab.py b/helpers/fstab.py index 40dac4c..10b99ba 100644 --- a/helpers/fstab.py +++ b/helpers/fstab.py @@ -18,7 +18,7 @@ from ovs.extensions.generic.system import System -class FstabHelper(): +class FstabHelper(object): """ Class to help with Fstab manipulations Inherits from file class diff --git a/helpers/storagerouter.py b/helpers/storagerouter.py index 7d006d5..6677401 100644 --- a/helpers/storagerouter.py +++ b/helpers/storagerouter.py @@ -13,7 +13,6 @@ # # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. - from ovs.dal.hybrids.storagerouter import StorageRouter from ovs.dal.lists.storagerouterlist import StorageRouterList from ovs.extensions.generic.logger import Logger diff --git a/remove/roles.py b/remove/roles.py index 61891d2..beba8ef 100644 --- a/remove/roles.py +++ b/remove/roles.py @@ -24,7 +24,6 @@ from ..setup.roles import RoleSetup - class RoleRemover(CIConstants): LOGGER = Logger("remove-ci_role_remover") diff --git a/setup/arakoon.py b/setup/arakoon.py index cbef2a5..52fe0cc 100644 --- a/setup/arakoon.py +++ b/setup/arakoon.py @@ -24,7 +24,6 @@ from ..validate.backend import BackendValidation - class ArakoonSetup(object): LOGGER = Logger("setup-ci_arakoon_setup") diff --git a/setup/backend.py b/setup/backend.py index e12477c..48103fc 100644 --- a/setup/backend.py +++ b/setup/backend.py @@ -14,7 +14,6 @@ # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. import time - from ovs.extensions.generic.logger import Logger from ..helpers.albanode import AlbaNodeHelper from ..helpers.backend import BackendHelper From 216bd4af61af1c4753aaf81ab277470fc96526ee Mon Sep 17 00:00:00 2001 From: Simon Van den Bossche Date: Tue, 2 Jan 2018 12:03:28 +0100 Subject: [PATCH 16/28] fix review: import comments --- ci/__init__.py | 19 ------------------- .../openvstorage-automation-lib.install | 1 - .../openvstorage-automation-lib.postinst | 3 +++ 3 files changed, 3 insertions(+), 20 deletions(-) delete mode 100644 ci/__init__.py create mode 100644 packaging/debian/debian/openvstorage-automation-lib.postinst diff --git a/ci/__init__.py b/ci/__init__.py deleted file mode 100644 index 8ae97b6..0000000 --- a/ci/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (C) 2016 iNuron NV -# -# This file is part of Open vStorage Open Source Edition (OSE), -# as available from -# -# http://www.openvstorage.org and -# http://www.openvstorage.com. -# -# This file is free software; you can redistribute it and/or modify it -# under the terms of the GNU Affero General Public License v3 (GNU AGPLv3) -# as published by the Free Software Foundation, in version 3 as it comes -# in the LICENSE.txt file of the Open vStorage OSE distribution. -# -# Open vStorage is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY of any kind. - -""" -Init -""" diff --git a/packaging/debian/debian/openvstorage-automation-lib.install b/packaging/debian/debian/openvstorage-automation-lib.install index b6ce271..283228f 100644 --- a/packaging/debian/debian/openvstorage-automation-lib.install +++ b/packaging/debian/debian/openvstorage-automation-lib.install @@ -1,4 +1,3 @@ -ci opt/OpenvStorage/ci/ helpers opt/OpenvStorage/ci/api_lib/ setup opt/OpenvStorage/ci/api_lib/ remove opt/OpenvStorage/ci/api_lib/ diff --git a/packaging/debian/debian/openvstorage-automation-lib.postinst b/packaging/debian/debian/openvstorage-automation-lib.postinst new file mode 100644 index 0000000..03efbb2 --- /dev/null +++ b/packaging/debian/debian/openvstorage-automation-lib.postinst @@ -0,0 +1,3 @@ +#!/bin/bash + +touch /opt/OpenvStorage/ci/__init__.py \ No newline at end of file From 8136fcabaf5492fb1c4ba35391d913f31927703a Mon Sep 17 00:00:00 2001 From: simonOpenV <33058909+simonOpenV@users.noreply.github.com> Date: Tue, 2 Jan 2018 13:25:47 +0100 Subject: [PATCH 17/28] Update openvstorage-automation-lib.postinst --- packaging/debian/debian/openvstorage-automation-lib.postinst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/debian/debian/openvstorage-automation-lib.postinst b/packaging/debian/debian/openvstorage-automation-lib.postinst index 03efbb2..3e82486 100644 --- a/packaging/debian/debian/openvstorage-automation-lib.postinst +++ b/packaging/debian/debian/openvstorage-automation-lib.postinst @@ -1,3 +1,3 @@ #!/bin/bash -touch /opt/OpenvStorage/ci/__init__.py \ No newline at end of file +touch /opt/OpenvStorage/ci/__init__.py From 29368a62a9e65dd4f33cac8813a92ee81797a19f Mon Sep 17 00:00:00 2001 From: Simon Van den Bossche Date: Wed, 3 Jan 2018 11:25:20 +0100 Subject: [PATCH 18/28] added *args and **kwargs for backwards compatibility with the now obsolete 'api' param --- helpers/albanode.py | 2 +- helpers/backend.py | 4 +- helpers/storagerouter.py | 2 +- helpers/vdisk.py | 4 +- remove/backend.py | 156 ++------------------------------------- remove/roles.py | 2 +- remove/vdisk.py | 10 +-- remove/vpool.py | 2 +- setup/backend.py | 20 ++--- setup/domain.py | 6 +- setup/roles.py | 2 +- setup/vdisk.py | 16 ++-- 12 files changed, 41 insertions(+), 185 deletions(-) diff --git a/helpers/albanode.py b/helpers/albanode.py index 453a234..9e5dd94 100644 --- a/helpers/albanode.py +++ b/helpers/albanode.py @@ -29,7 +29,7 @@ class AlbaNodeHelper(CIConstants): IGNORE_KEYS = ('_error', '_duration', '_version', '_success') @classmethod - def _map_alba_nodes(cls): + def _map_alba_nodes(cls, *args, **kwargs): """ Will map the alba_node_id with its guid counterpart and return the map dict """ diff --git a/helpers/backend.py b/helpers/backend.py index 57c53a5..cd790fc 100644 --- a/helpers/backend.py +++ b/helpers/backend.py @@ -130,7 +130,7 @@ def get_albabackend_by_name(albabackend_name): raise NameError(error_msg) @classmethod - def get_asd_safety(cls, albabackend_guid, asd_id): + def get_asd_safety(cls, albabackend_guid, asd_id, *args, **kwargs): """ Request the calculation of the disk safety :param albabackend_guid: guid of the alba backend @@ -151,7 +151,7 @@ def get_asd_safety(cls, albabackend_guid, asd_id): return result[1] @classmethod - def get_backend_local_stack(cls, albabackend_name): + def get_backend_local_stack(cls, albabackend_name, *args, **kwargs): """ Fetches the local stack property of a backend diff --git a/helpers/storagerouter.py b/helpers/storagerouter.py index 6677401..18df9ec 100644 --- a/helpers/storagerouter.py +++ b/helpers/storagerouter.py @@ -99,7 +99,7 @@ def get_storagerouters(): return StorageRouterList.get_storagerouters() @classmethod - def sync_disk_with_reality(cls, guid=None, ip=None, timeout=None): + def sync_disk_with_reality(cls, guid=None, ip=None, timeout=None, *args, **kwargs): """ :param guid: guid of the storagerouter :type guid: str diff --git a/helpers/vdisk.py b/helpers/vdisk.py index 0ace9c1..6ecee1f 100644 --- a/helpers/vdisk.py +++ b/helpers/vdisk.py @@ -109,7 +109,7 @@ def get_snapshot_by_guid(snapshot_guid, vdisk_name, vpool_name): @classmethod - def get_config_params(cls, vdisk_name, vpool_name, timeout=GET_CONFIG_PARAMS_TIMEOUT): + def get_config_params(cls, vdisk_name, vpool_name, timeout=GET_CONFIG_PARAMS_TIMEOUT, *args, **kwargs): """ Fetch the config parameters of a vDisk @@ -147,7 +147,7 @@ def get_config_params(cls, vdisk_name, vpool_name, timeout=GET_CONFIG_PARAMS_TIM @classmethod - def scrub_vdisk(cls, vdisk_guid, timeout=15 * 60, wait=True): + def scrub_vdisk(cls, vdisk_guid, timeout=15 * 60, wait=True, *args, **kwargs): """ Scrub a specific vdisk :param vdisk_guid: guid of the vdisk to scrub diff --git a/remove/backend.py b/remove/backend.py index 9b832aa..01bcb0a 100644 --- a/remove/backend.py +++ b/remove/backend.py @@ -37,7 +37,7 @@ def remove_claimed_disk(cls): pass @classmethod - def remove_asds(cls, albabackend_name, target, disks): + def remove_asds(cls, albabackend_name, target, disks, *args, **kwargs): """ Remove all asds from a backend @@ -83,7 +83,7 @@ def remove_asds(cls, albabackend_name, target, disks): BackendRemover._remove_disk(alba_node_guid=alba_node_guid, diskname=disk_path) @classmethod - def _remove_asd(cls, alba_node_guid, asd_id, asd_safety, timeout=REMOVE_ASD_TIMEOUT): + def _remove_asd(cls, alba_node_guid, asd_id, asd_safety, timeout=REMOVE_ASD_TIMEOUT, *args, **kwargs): """ Remove a asd from a backend @@ -114,7 +114,7 @@ def _remove_asd(cls, alba_node_guid, asd_id, asd_safety, timeout=REMOVE_ASD_TIME return result[0] @classmethod - def _remove_disk(cls, alba_node_guid, diskname, timeout=REMOVE_DISK_TIMEOUT): + def _remove_disk(cls, alba_node_guid, diskname, timeout=REMOVE_DISK_TIMEOUT, *args, **kwargs): """ Removes a an initiliazed disk from the model @@ -142,7 +142,7 @@ def _remove_disk(cls, alba_node_guid, diskname, timeout=REMOVE_DISK_TIMEOUT): @classmethod @required_backend - def remove_backend(cls, albabackend_name, timeout=REMOVE_BACKEND_TIMEOUT): + def remove_backend(cls, albabackend_name, timeout=REMOVE_BACKEND_TIMEOUT, *args, **kwargs): """ Removes a alba backend from the ovs cluster @@ -167,7 +167,7 @@ def remove_backend(cls, albabackend_name, timeout=REMOVE_BACKEND_TIMEOUT): @classmethod @required_preset @required_backend - def remove_preset(cls, preset_name, albabackend_name, timeout=REMOVE_PRESET_TIMEOUT): + def remove_preset(cls, preset_name, albabackend_name, timeout=REMOVE_PRESET_TIMEOUT, *args, **kwargs): """ Removes a alba backend from the ovs cluster @@ -196,151 +196,7 @@ def remove_preset(cls, preset_name, albabackend_name, timeout=REMOVE_PRESET_TIME @classmethod #@required_backend - def unlink_backend(cls, globalbackend_name, albabackend_name, timeout=UNLINK_BACKEND_TIMEOUT): - """ - Link a LOCAL backend to a GLOBAL backend - - :param globalbackend_name: name of a GLOBAL alba backend - :type globalbackend_name: str - :param albabackend_name: name of a backend to unlink - :type albabackend_name: str - :param timeout: timeout counter in seconds - :type timeout: int - :return: - """ - data = { - "linked_guid": BackendHelper.get_alba_backend_guid_by_name(albabackend_name) - } - - task_guid = cls.api.post( - api='/alba/backends/{0}/unlink_alba_backends' - .format(BackendHelper.get_alba_backend_guid_by_name(globalbackend_name)), - data=data - ) - - task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) - if not task_result[0]: - error_msg = "Unlinking backend `{0}` from global backend `{1}` has failed with error '{2}'".format( - albabackend_name, globalbackend_name, task_result[1]) - BackendRemover.LOGGER.error(error_msg) - raise RuntimeError(error_msg) - else: - BackendRemover.LOGGER.info("Unlinking backend `{0}` from global backend `{1}` should have succeeded" - .format(albabackend_name, globalbackend_name)) - return task_result[0] - - - @classmethod - #@required_backend - def unlink_backend(cls, globalbackend_name, albabackend_name, timeout=UNLINK_BACKEND_TIMEOUT): - """ - Link a LOCAL backend to a GLOBAL backend - - :param globalbackend_name: name of a GLOBAL alba backend - :type globalbackend_name: str - :param albabackend_name: name of a backend to unlink - :type albabackend_name: str - :param timeout: timeout counter in seconds - :type timeout: int - :return: - """ - data = { - "linked_guid": BackendHelper.get_alba_backend_guid_by_name(albabackend_name) - } - - task_guid = cls.api.post( - api='/alba/backends/{0}/unlink_alba_backends' - .format(BackendHelper.get_alba_backend_guid_by_name(globalbackend_name)), - data=data - ) - - task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) - if not task_result[0]: - error_msg = "Unlinking backend `{0}` from global backend `{1}` has failed with error '{2}'".format( - albabackend_name, globalbackend_name, task_result[1]) - BackendRemover.LOGGER.error(error_msg) - raise RuntimeError(error_msg) - else: - BackendRemover.LOGGER.info("Unlinking backend `{0}` from global backend `{1}` should have succeeded" - .format(albabackend_name, globalbackend_name)) - return task_result[0] - - - @classmethod - #@required_backend - def unlink_backend(cls, globalbackend_name, albabackend_name, timeout=UNLINK_BACKEND_TIMEOUT): - """ - Link a LOCAL backend to a GLOBAL backend - - :param globalbackend_name: name of a GLOBAL alba backend - :type globalbackend_name: str - :param albabackend_name: name of a backend to unlink - :type albabackend_name: str - :param timeout: timeout counter in seconds - :type timeout: int - :return: - """ - data = { - "linked_guid": BackendHelper.get_alba_backend_guid_by_name(albabackend_name) - } - - task_guid = cls.api.post( - api='/alba/backends/{0}/unlink_alba_backends' - .format(BackendHelper.get_alba_backend_guid_by_name(globalbackend_name)), - data=data - ) - - task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) - if not task_result[0]: - error_msg = "Unlinking backend `{0}` from global backend `{1}` has failed with error '{2}'".format( - albabackend_name, globalbackend_name, task_result[1]) - BackendRemover.LOGGER.error(error_msg) - raise RuntimeError(error_msg) - else: - BackendRemover.LOGGER.info("Unlinking backend `{0}` from global backend `{1}` should have succeeded" - .format(albabackend_name, globalbackend_name)) - return task_result[0] - - - @classmethod - #@required_backend - def unlink_backend(cls, globalbackend_name, albabackend_name, timeout=UNLINK_BACKEND_TIMEOUT): - """ - Link a LOCAL backend to a GLOBAL backend - - :param globalbackend_name: name of a GLOBAL alba backend - :type globalbackend_name: str - :param albabackend_name: name of a backend to unlink - :type albabackend_name: str - :param timeout: timeout counter in seconds - :type timeout: int - :return: - """ - data = { - "linked_guid": BackendHelper.get_alba_backend_guid_by_name(albabackend_name) - } - - task_guid = cls.api.post( - api='/alba/backends/{0}/unlink_alba_backends' - .format(BackendHelper.get_alba_backend_guid_by_name(globalbackend_name)), - data=data - ) - - task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout) - if not task_result[0]: - error_msg = "Unlinking backend `{0}` from global backend `{1}` has failed with error '{2}'".format( - albabackend_name, globalbackend_name, task_result[1]) - BackendRemover.LOGGER.error(error_msg) - raise RuntimeError(error_msg) - else: - BackendRemover.LOGGER.info("Unlinking backend `{0}` from global backend `{1}` should have succeeded" - .format(albabackend_name, globalbackend_name)) - return task_result[0] - - - @classmethod - #@required_backend - def unlink_backend(cls, globalbackend_name, albabackend_name, timeout=UNLINK_BACKEND_TIMEOUT): + def unlink_backend(cls, globalbackend_name, albabackend_name, timeout=UNLINK_BACKEND_TIMEOUT, *args, **kwargs): """ Link a LOCAL backend to a GLOBAL backend diff --git a/remove/roles.py b/remove/roles.py index beba8ef..16cdb02 100644 --- a/remove/roles.py +++ b/remove/roles.py @@ -66,7 +66,7 @@ def _remove_filesystem(device, alias_part_label, client=None): raise RuntimeError('Could not remove filesystem of {0}'.format(alias_part_label)) @classmethod - def remove_role(cls, storagerouter_ip, diskname): + def remove_role(cls, storagerouter_ip, diskname, *args, **kwargs): allowed_roles = ['WRITE', 'DTL', 'SCRUB', 'DB'] cls.LOGGER.info("Starting removal of disk roles.") # Fetch information diff --git a/remove/vdisk.py b/remove/vdisk.py index d683163..0749814 100644 --- a/remove/vdisk.py +++ b/remove/vdisk.py @@ -31,7 +31,7 @@ def __init__(self): pass @classmethod - def remove_vdisks_with_structure(cls, vdisks, timeout=REMOVE_VDISK_TIMEOUT): + def remove_vdisks_with_structure(cls, vdisks, timeout=REMOVE_VDISK_TIMEOUT, *args, **kwargs): """ Remove many vdisks at once. Will keep the parent structure in mind :param vdisks: list of vdisks @@ -50,7 +50,7 @@ def remove_vdisks_with_structure(cls, vdisks, timeout=REMOVE_VDISK_TIMEOUT): removed_guids.append(vdisk.guid) @classmethod - def remove_snapshot(cls, snapshot_guid, vdisk_name, vpool_name, timeout=REMOVE_SNAPSHOT_TIMEOUT): + def remove_snapshot(cls, snapshot_guid, vdisk_name, vpool_name, timeout=REMOVE_SNAPSHOT_TIMEOUT, *args, **kwargs): """ Remove a existing snapshot from a existing vdisk :param vdisk_name: location of a vdisk on a vpool @@ -84,7 +84,7 @@ def remove_snapshot(cls, snapshot_guid, vdisk_name, vpool_name, timeout=REMOVE_S return True @classmethod - def remove_vdisk(cls, vdisk_guid, timeout=REMOVE_VDISK_TIMEOUT): + def remove_vdisk(cls, vdisk_guid, timeout=REMOVE_VDISK_TIMEOUT, *args, **kwargs): """ Remove a vdisk from a vPool :param vdisk_guid: guid of a existing vdisk @@ -105,7 +105,7 @@ def remove_vdisk(cls, vdisk_guid, timeout=REMOVE_VDISK_TIMEOUT): return True @classmethod - def remove_vdisk_by_name(cls, vdisk_name, vpool_name, timeout=REMOVE_VDISK_TIMEOUT): + def remove_vdisk_by_name(cls, vdisk_name, vpool_name, timeout=REMOVE_VDISK_TIMEOUT, *args, **kwargs): """ Remove a vdisk from a vPool :param vdisk_name: name of a existing vdisk (e.g. test.raw) @@ -120,7 +120,7 @@ def remove_vdisk_by_name(cls, vdisk_name, vpool_name, timeout=REMOVE_VDISK_TIMEO @classmethod @required_vtemplate - def remove_vtemplate_by_name(cls, vdisk_name, vpool_name, timeout=REMOVE_VTEMPLATE_TIMEOUT): + def remove_vtemplate_by_name(cls, vdisk_name, vpool_name, timeout=REMOVE_VTEMPLATE_TIMEOUT, *args, **kwargs): """ Remove a vTemplate from a cluster :param vdisk_name: name of a existing vdisk (e.g. test.raw) diff --git a/remove/vpool.py b/remove/vpool.py index 132bdf5..1c03ba7 100644 --- a/remove/vpool.py +++ b/remove/vpool.py @@ -26,7 +26,7 @@ class VPoolRemover(CIConstants): REMOVE_VPOOL_TIMEOUT = 500 @classmethod - def remove_vpool(cls, vpool_name, storagerouter_ip, timeout=REMOVE_VPOOL_TIMEOUT): + def remove_vpool(cls, vpool_name, storagerouter_ip, timeout=REMOVE_VPOOL_TIMEOUT, *args, **kwargs): """ Removes a existing vpool from a storagerouter :param vpool_name: the name of a existing vpool diff --git a/setup/backend.py b/setup/backend.py index 48103fc..318b633 100644 --- a/setup/backend.py +++ b/setup/backend.py @@ -41,7 +41,7 @@ def __init__(self): @classmethod @check_backend @required_roles(['DB']) - def add_backend(cls, backend_name, scaling='LOCAL', timeout=BACKEND_TIMEOUT, max_tries=MAX_BACKEND_TRIES): + def add_backend(cls, backend_name, scaling='LOCAL', timeout=BACKEND_TIMEOUT, max_tries=MAX_BACKEND_TRIES, *args, **kwargs): """ Add a new backend :param backend_name: Name of the Backend to add @@ -97,7 +97,7 @@ def add_backend(cls, backend_name, scaling='LOCAL', timeout=BACKEND_TIMEOUT, max @classmethod @check_preset @required_backend - def add_preset(cls, albabackend_name, preset_details, timeout=ADD_PRESET_TIMEOUT): + def add_preset(cls, albabackend_name, preset_details, timeout=ADD_PRESET_TIMEOUT, *args, **kwargs): """ Add a new preset :param albabackend_name: albabackend name (e.g. 'mybackend') @@ -146,7 +146,7 @@ def add_preset(cls, albabackend_name, preset_details, timeout=ADD_PRESET_TIMEOUT @classmethod @required_preset @required_backend - def update_preset(cls, albabackend_name, preset_name, policies, timeout=UPDATE_PRESET_TIMEOUT): + def update_preset(cls, albabackend_name, preset_name, policies, timeout=UPDATE_PRESET_TIMEOUT, *args, **kwargs): """ Update a existing preset :param albabackend_name: albabackend name @@ -181,7 +181,7 @@ def update_preset(cls, albabackend_name, preset_name, policies, timeout=UPDATE_P @classmethod @required_backend @filter_osds - def add_asds(cls, target, disks, albabackend_name, claim_retries=MAX_CLAIM_RETRIES): + def add_asds(cls, target, disks, albabackend_name, claim_retries=MAX_CLAIM_RETRIES, *args, **kwargs): """ Initialize and claim a new asds on given disks :param target: target to add asds too @@ -262,7 +262,7 @@ def add_asds(cls, target, disks, albabackend_name, claim_retries=MAX_CLAIM_RETRI BackendSetup._claim_osds(alba_backend_name=albabackend_name, alba_node_guid=alba_node_guid, osds=osds_to_claim) @classmethod - def _discover_and_register_nodes(cls): + def _discover_and_register_nodes(cls, *args, **kwargs): """ Will discover and register potential nodes to the DAL/Alba """ @@ -283,7 +283,7 @@ def _discover_and_register_nodes(cls): ) @classmethod - def _map_alba_nodes(cls): + def _map_alba_nodes(cls, *args, **kwargs): """ Will map the alba_node_id with its guid counterpart and return the map dict """ @@ -302,7 +302,7 @@ def _map_alba_nodes(cls): return mapping @classmethod - def get_backend_local_stack(cls, alba_backend_name): + def get_backend_local_stack(cls, alba_backend_name, *args, **kwargs): """ Fetches the local stack property of a backend :param alba_backend_name: backend name @@ -316,7 +316,7 @@ def get_backend_local_stack(cls, alba_backend_name): ) @classmethod - def _fill_slots(cls, alba_node_guid, slot_information, timeout=INITIALIZE_DISK_TIMEOUT): + def _fill_slots(cls, alba_node_guid, slot_information, timeout=INITIALIZE_DISK_TIMEOUT, *args, **kwargs): """ Initializes a disk to create osds :param alba_node_guid: @@ -341,7 +341,7 @@ def _fill_slots(cls, alba_node_guid, slot_information, timeout=INITIALIZE_DISK_T return task_result[0] @classmethod - def _claim_osds(cls, alba_backend_name, alba_node_guid, osds, timeout=CLAIM_ASD_TIMEOUT): + def _claim_osds(cls, alba_backend_name, alba_node_guid, osds, timeout=CLAIM_ASD_TIMEOUT, *args, **kwargs): """ Claims a asd :param alba_backend_name: backend name @@ -374,7 +374,7 @@ def _claim_osds(cls, alba_backend_name, alba_node_guid, osds, timeout=CLAIM_ASD_ @required_preset @required_backend @check_linked_backend - def link_backend(cls, albabackend_name, globalbackend_name, preset_name, timeout=LINK_BACKEND_TIMEOUT): + def link_backend(cls, albabackend_name, globalbackend_name, preset_name, timeout=LINK_BACKEND_TIMEOUT, *args, **kwargs): """ Link a LOCAL backend to a GLOBAL backend diff --git a/setup/domain.py b/setup/domain.py index 059ca7a..5a43317 100644 --- a/setup/domain.py +++ b/setup/domain.py @@ -30,7 +30,7 @@ def __init__(self): pass @classmethod - def add_domain(cls, domain_name): + def add_domain(cls, domain_name, *args, **kwargs): """ Add a new (recovery) domain to the cluster @@ -57,7 +57,7 @@ def add_domain(cls, domain_name): return @classmethod - def link_domains_to_storagerouter(cls, domain_details, storagerouter_ip): + def link_domains_to_storagerouter(cls, domain_details, storagerouter_ip, *args, **kwargs): """ Link a existing domain(s) and/or recovery (domains) to a storagerouter :param domain_details: domain details of a storagerouter @@ -100,7 +100,7 @@ def link_domains_to_storagerouter(cls, domain_details, storagerouter_ip): @classmethod @required_backend - def link_domains_to_backend(cls, domain_details, albabackend_name): + def link_domains_to_backend(cls, domain_details, albabackend_name, *args, **kwargs): """ Link a existing domain(s) and/or recovery (domains) to a storagerouter diff --git a/setup/roles.py b/setup/roles.py index 624fa68..be13e54 100644 --- a/setup/roles.py +++ b/setup/roles.py @@ -86,7 +86,7 @@ def add_disk_role(cls, storagerouter_ip, diskname, roles, min_size=MIN_PARTITION @classmethod def configure_disk(cls, storagerouter_guid, disk_guid, offset, size, roles, partition_guid=None, - timeout=CONFIGURE_DISK_TIMEOUT): + timeout=CONFIGURE_DISK_TIMEOUT, *args, **kwargs): """ Partition a disk and add roles to it diff --git a/setup/vdisk.py b/setup/vdisk.py index 3449804..b4c7030 100644 --- a/setup/vdisk.py +++ b/setup/vdisk.py @@ -37,7 +37,7 @@ def __init__(self): @classmethod def create_snapshot(cls, snapshot_name, vdisk_name, vpool_name, consistent=True, sticky=True, - timeout=CREATE_SNAPSHOT_TIMEOUT): + timeout=CREATE_SNAPSHOT_TIMEOUT, *args, **kwargs): """ Create a new snapshot for a vdisk @@ -82,7 +82,7 @@ def create_snapshot(cls, snapshot_name, vdisk_name, vpool_name, consistent=True, return task_result[1] @classmethod - def create_vdisk(cls, vdisk_name, vpool_name, size, storagerouter_ip, timeout=CREATE_VDISK_TIMEOUT): + def create_vdisk(cls, vdisk_name, vpool_name, size, storagerouter_ip, timeout=CREATE_VDISK_TIMEOUT, *args, **kwargs): """ Create a new vDisk on a certain vPool/storagerouter :param vdisk_name: location of a vdisk on a vpool (e.g. /mnt/vpool/test.raw = test.raw, /mnt/vpool/volumes/test.raw = volumes/test.raw ) @@ -132,7 +132,7 @@ def create_vdisk(cls, vdisk_name, vpool_name, size, storagerouter_ip, timeout=CR @classmethod @required_vdisk - def move_vdisk(cls, vdisk_guid, target_storagerouter_guid, timeout=60): + def move_vdisk(cls, vdisk_guid, target_storagerouter_guid, timeout=60, *args, **kwargs): """ Moves a vdisk :param vdisk_guid: guid of the vdisk @@ -162,7 +162,7 @@ def move_vdisk(cls, vdisk_guid, target_storagerouter_guid, timeout=60): @required_vdisk @required_snapshot def create_clone(cls, vdisk_name, vpool_name, new_vdisk_name, storagerouter_ip, snapshot_id=None, - timeout=CREATE_CLONE_TIMEOUT): + timeout=CREATE_CLONE_TIMEOUT, *args, **kwargs): """ Create a new vDisk on a certain vPool/storagerouter :param vdisk_name: location of a vdisk on a vpool (e.g. /mnt/vpool/test.raw = test.raw, /mnt/vpool/volumes/test.raw = volumes/test.raw ) @@ -224,7 +224,7 @@ def create_clone(cls, vdisk_name, vpool_name, new_vdisk_name, storagerouter_ip, @classmethod @required_vdisk - def set_vdisk_as_template(cls, vdisk_name, vpool_name, timeout=SET_VDISK_AS_TEMPLATE_TIMEOUT): + def set_vdisk_as_template(cls, vdisk_name, vpool_name, timeout=SET_VDISK_AS_TEMPLATE_TIMEOUT, *args, **kwargs): """ Create a new vDisk on a certain vPool/storagerouter Set a existing vDisk as vTemplate @@ -255,7 +255,7 @@ def set_vdisk_as_template(cls, vdisk_name, vpool_name, timeout=SET_VDISK_AS_TEMP @classmethod @required_vtemplate def create_from_template(cls, vdisk_name, vpool_name, new_vdisk_name, storagerouter_ip, - timeout=SET_VDISK_AS_TEMPLATE_TIMEOUT): + timeout=SET_VDISK_AS_TEMPLATE_TIMEOUT, *args, **kwargs): """ Create a new vDisk on a certain vPool/storagerouter Set a existing vDisk as vTemplate @@ -300,7 +300,7 @@ def create_from_template(cls, vdisk_name, vpool_name, new_vdisk_name, storagerou @classmethod @required_vdisk - def rollback_to_snapshot(cls, vdisk_name, vpool_name, snapshot_id, timeout=ROLLBACK_VDISK_TIMEOUT): + def rollback_to_snapshot(cls, vdisk_name, vpool_name, snapshot_id, timeout=ROLLBACK_VDISK_TIMEOUT, *args, **kwargs): """ Rollback a vdisk to a certain snapshot @@ -335,7 +335,7 @@ def rollback_to_snapshot(cls, vdisk_name, vpool_name, snapshot_id, timeout=ROLLB @classmethod @required_vdisk - def set_config_params(cls, vdisk_name, vpool_name, config, timeout=SET_CONFIG_VDISK_TIMEOUT): + def set_config_params(cls, vdisk_name, vpool_name, config, timeout=SET_CONFIG_VDISK_TIMEOUT, *args, **kwargs): """ Rollback a vdisk to a certain snapshot From 2959aa9b27c6b9fe33f2640881ce6997105f7275 Mon Sep 17 00:00:00 2001 From: Simon Van den Bossche Date: Wed, 3 Jan 2018 15:15:22 +0100 Subject: [PATCH 19/28] added args and kwargs in add_disk_role for setup runner fix --- setup/roles.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/roles.py b/setup/roles.py index be13e54..b07ae44 100644 --- a/setup/roles.py +++ b/setup/roles.py @@ -32,7 +32,7 @@ def __init__(self): @classmethod @check_role_on_disk - def add_disk_role(cls, storagerouter_ip, diskname, roles, min_size=MIN_PARTITION_SIZE): + def add_disk_role(cls, storagerouter_ip, diskname, roles, min_size=MIN_PARTITION_SIZE, *args, **kwargs): """ Partition and adds roles to a disk From 55a51233151c8a3412343e4a3255a5a5aa6a6726 Mon Sep 17 00:00:00 2001 From: Jeffrey Devloo Date: Fri, 26 Jan 2018 15:41:04 +0100 Subject: [PATCH 20/28] Resolve to full path instead of local one --- helpers/tests/jsongeneratortestcase.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helpers/tests/jsongeneratortestcase.py b/helpers/tests/jsongeneratortestcase.py index 1e5abda..810ef38 100644 --- a/helpers/tests/jsongeneratortestcase.py +++ b/helpers/tests/jsongeneratortestcase.py @@ -14,7 +14,7 @@ # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. import unittest -from helpers.setupjsongenerator import SetupJsonGenerator +from ci.api_lib.helpers.setupjsongenerator import SetupJsonGenerator class JsonGeneratorTestcase(unittest.TestCase): From 16012abe671fda6c47e603628b2a8c719eef2e6a Mon Sep 17 00:00:00 2001 From: Simon Van den Bossche Date: Tue, 6 Feb 2018 14:52:12 +0100 Subject: [PATCH 21/28] Fix more docstrings and logging --- helpers/ci_constants.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/helpers/ci_constants.py b/helpers/ci_constants.py index c435351..6bc9f4e 100644 --- a/helpers/ci_constants.py +++ b/helpers/ci_constants.py @@ -24,15 +24,11 @@ class CIConstants(object): CONFIG_LOC = "/opt/OpenvStorage/ci/config/setup.json" TEST_SCENARIO_LOC = "/opt/OpenvStorage/ci/scenarios/" - SETTINGS_LOC = "/opt/OpenvStorage/ci/config/settings.json" TESTRAIL_LOC = "/opt/OpenvStorage/ci/config/testrail.json" with open(CONFIG_LOC, 'r') as JSON_CONFIG: SETUP_CFG = json.load(JSON_CONFIG) - with open(SETTINGS_LOC, 'r') as JSON_SETTINGS: - SETTINGS = json.load(JSON_SETTINGS) - HYPERVISOR_INFO = SETUP_CFG['ci'].get('hypervisor') DOMAIN_INFO = SETUP_CFG['setup']['domains'] BACKEND_INFO = SETUP_CFG['setup']['backends'] From 95ea596aeba5f01563541340bbd7c839183d86c7 Mon Sep 17 00:00:00 2001 From: Simon Van den Bossche Date: Thu, 8 Feb 2018 10:02:22 +0100 Subject: [PATCH 22/28] Use ExtensionsToolbox verify_params --- helpers/hypervisor/apis/kvm/sdk.py | 4 ++-- helpers/hypervisor/hypervisor.py | 3 ++- helpers/setupjsongenerator.py | 24 +++++++++++++----------- setup/proxy.py | 3 ++- setup/vpool.py | 4 ++-- 5 files changed, 21 insertions(+), 17 deletions(-) diff --git a/helpers/hypervisor/apis/kvm/sdk.py b/helpers/hypervisor/apis/kvm/sdk.py index e01717b..8aad53a 100644 --- a/helpers/hypervisor/apis/kvm/sdk.py +++ b/helpers/hypervisor/apis/kvm/sdk.py @@ -25,7 +25,7 @@ from ovs.extensions.generic.logger import Logger from ovs.extensions.generic.sshclient import SSHClient from ovs.extensions.generic.system import System -from ovs.lib.helpers.toolbox import Toolbox +from ovs_extensions.generic.toolbox import ExtensionsToolbox from xml.etree import ElementTree from xml.etree.ElementTree import Element # Relative @@ -627,7 +627,7 @@ def create_vm(self, name, vcpus, ram, disks, cdrom_iso=None, os_type=None, os_va 'hostname': (str, None), 'username': (str, None, False), 'password': (str, None, False)} - Toolbox.verify_required_params(required_edge_params, edge_configuration) + ExtensionsToolbox.verify_required_params(required_edge_params, edge_configuration) ovs_vm = True command = ['virt-install'] options = ['--connect=qemu+ssh://{0}@{1}/system'.format(self.login, self.host), diff --git a/helpers/hypervisor/hypervisor.py b/helpers/hypervisor/hypervisor.py index 66a58b9..c0c4230 100644 --- a/helpers/hypervisor/hypervisor.py +++ b/helpers/hypervisor/hypervisor.py @@ -17,6 +17,7 @@ Using the module requires libvirt api to be available on the MACHINE THAT EXECUTES THE CODE """ from ovs_extensions.generic.filemutex import file_mutex +from ovs_extensions.generic.toolbox import ExtensionsToolbox from ovs.lib.helpers.toolbox import Toolbox from ...helpers.ci_constants import CIConstants @@ -75,7 +76,7 @@ def __init__(self, ip, user, password, type): 'user': (str, None), 'password': (str, None), 'type': (str, ['KVM', 'VMWARE'])} - Toolbox.verify_required_params(required_params, {'ip': ip, + ExtensionsToolbox.verify_required_params(required_params, {'ip': ip, 'user': user, 'password': password, 'type': type}) diff --git a/helpers/setupjsongenerator.py b/helpers/setupjsongenerator.py index 58a3ac8..8169c89 100644 --- a/helpers/setupjsongenerator.py +++ b/helpers/setupjsongenerator.py @@ -18,6 +18,7 @@ from ci.autotests import AutoTests from ovs.dal.hybrids.albabackend import AlbaBackend from ovs.dal.hybrids.diskpartition import DiskPartition +from ovs_extensions.generic.toolbox import ExtensionsToolbox from ovs.extensions.storageserver.storagedriver import StorageDriverClient from ovs.lib.helpers.toolbox import Toolbox @@ -103,7 +104,8 @@ def update_ci(self, ci_params): 'config_manager': 'arakoon'} all_params.update(ci_params) - Toolbox.verify_required_params(required_params=params_layout, actual_params=all_params, verify_keys=True) + + ExtensionsToolbox.verify_required_params(required_params=params_layout, actual_params=all_params, verify_keys=True) if os.system('ping -c 1 {}'.format(all_params['grid_ip'])) != 0: raise ValueError('No response from ip {0}'.format(all_params['grid_ip'])) @@ -212,7 +214,7 @@ def add_storagerouter(self, storagerouter_ip, hostname): """ self._validate_ip(storagerouter_ip) required_params = {'hostname': (str, None, True)} - Toolbox.verify_required_params(required_params=required_params, actual_params={'hostname': hostname}, verify_keys=True) + ExtensionsToolbox.verify_required_params(required_params=required_params, actual_params={'hostname': hostname}, verify_keys=True) if 'setup' not in self.config.keys(): self.config['setup'] = {} if 'storagerouters' in self.config['setup'].keys(): @@ -246,7 +248,7 @@ def add_disk_to_sr(self, storagerouter_ip, name, roles): """ self._valid_storagerouter(storagerouter_ip) required_params = {'name': (str, None, True), 'roles': (list, None, True)} - Toolbox.verify_required_params(required_params=required_params, actual_params={'name': name, 'roles': roles}, verify_keys=True) + ExtensionsToolbox.verify_required_params(required_params=required_params, actual_params={'name': name, 'roles': roles}, verify_keys=True) for role in roles: if role not in DiskPartition.ROLES: raise ValueError('Provided role {0} is not an allowed role for disk {1}.'.format(role, name)) @@ -279,7 +281,7 @@ def add_domain_to_sr(self, storagerouter_ip, name, recovery=False): :type recovery: bool """ self._valid_storagerouter(storagerouter_ip) - Toolbox.verify_required_params(required_params={'name': (str, None, True)}, actual_params={'name': name}, verify_keys=True) + ExtensionsToolbox.verify_required_params(required_params={'name': (str, None, True)}, actual_params={'name': name}, verify_keys=True) if name not in self._domains: raise ValueError('Invalid domain passed: {0}'.format(name)) @@ -323,7 +325,7 @@ def add_backend(self, backend_name, domains=None, scaling='LOCAL'): if domain_name not in self._domains: raise ValueError('Invalid domain passed: {0}'.format(domain_name)) - Toolbox.verify_required_params(required_params={'backend_name': (str, Toolbox.regex_backend, True), + ExtensionsToolbox.verify_required_params(required_params={'backend_name': (str, Toolbox.regex_backend, True), 'domains': (list, self._domains, True), 'scaling': (str, AlbaBackend.SCALINGS, True)}, actual_params={'backend_name': backend_name, @@ -381,7 +383,7 @@ def add_preset_to_backend(self, backend_name, preset_name, policies, compression if fragment_size is not None and (not isinstance(fragment_size, int) or not 16 <= fragment_size <= 1024 ** 3): raise ValueError('Fragment size should be a positive integer smaller than 1 GiB') - Toolbox.verify_required_params(required_params={'backend_name': (str, Toolbox.regex_backend, True), + ExtensionsToolbox.verify_required_params(required_params={'backend_name': (str, Toolbox.regex_backend, True), 'preset_name': (str, Toolbox.regex_preset, True), 'policies': (list, None, True), 'fragment_size': (int, None, False)}, @@ -450,7 +452,7 @@ def add_osd_to_backend(self, backend_name, osds_on_disks=None, linked_backend=No 'osds_on_disk': osds_on_disks, 'linked_backend': linked_backend, 'linked_preset': linked_preset} - Toolbox.verify_required_params(required_params=required_params, actual_params=actual_params, verify_keys=True) + ExtensionsToolbox.verify_required_params(required_params=required_params, actual_params=actual_params, verify_keys=True) osd_dict = {} for index, backend in enumerate(self.config['setup']['backends']): @@ -519,7 +521,7 @@ def add_vpool(self, storagerouter_ip, backend_name, preset_name, storage_ip, vpo 'preset_name': preset_name, 'storage_ip': storage_ip} - Toolbox.verify_required_params(required_params=required_params, actual_params=actual_params, verify_keys=True) + ExtensionsToolbox.verify_required_params(required_params=required_params, actual_params=actual_params, verify_keys=True) self._valid_storagerouter(storagerouter_ip=storagerouter_ip) self._validate_ip(ip=storage_ip) if backend_name not in self._backends: @@ -580,7 +582,7 @@ def change_cache(self, storagerouter_ip, vpool, block_cache=True, fragment_cache 'fragment_cache': fragment_cache, 'on_read': on_read, 'on_write': on_write} - Toolbox.verify_required_params(required_params=required_params, actual_params=actual_params, verify_keys=True) + ExtensionsToolbox.verify_required_params(required_params=required_params, actual_params=actual_params, verify_keys=True) try: vpool = self.config['setup']['storagerouters'][storagerouter_ip]['vpools'][vpool] except KeyError: @@ -626,7 +628,7 @@ def update_storagedriver_of_vpool(self, sr_ip, vpool_name, sr_params=None): default_params.update(sr_params) if not isinstance(default_params, dict): raise ValueError('Parameters should be of type "dict"') - Toolbox.verify_required_params(required_params, default_params) + ExtensionsToolbox.verify_required_params(required_params, default_params) if sr_ip not in self.config['setup']['storagerouters'].keys(): raise KeyError('Storagerouter with ip is not defined') if vpool_name not in self.config['setup']['storagerouters'][sr_ip]['vpools']: @@ -654,7 +656,7 @@ def _valid_storagerouter(self, storagerouter_ip): def _validate_ip(self, ip): required_params = {'storagerouter_ip': (str, Toolbox.regex_ip, True)} try: - Toolbox.verify_required_params(required_params=required_params, actual_params={'storagerouter_ip': ip}, verify_keys=True) + ExtensionsToolbox.verify_required_params(required_params=required_params, actual_params={'storagerouter_ip': ip}, verify_keys=True) except RuntimeError as e: raise ValueError(e) if os.system('ping -c 1 {0}'.format(ip)) != 0: diff --git a/setup/proxy.py b/setup/proxy.py index db40fb2..7098c61 100644 --- a/setup/proxy.py +++ b/setup/proxy.py @@ -18,6 +18,7 @@ from ovs.dal.lists.vpoollist import VPoolList from ovs.extensions.generic.configuration import Configuration from ovs.extensions.generic.logger import Logger +from ovs_extensions.generic.toolbox import ExtensionsToolbox from ovs.extensions.services.servicefactory import ServiceFactory from ovs.lib.helpers.toolbox import Toolbox from ovs.dal.hybrids.service import Service @@ -49,7 +50,7 @@ def configure_proxy(backend_name, proxy_configuration): faulty_keys = [key for key in proxy_configuration.keys() if key not in ProxySetup.PARAMS] if len(faulty_keys) > 0: raise ValueError('{0} are unsupported keys for proxy configuration.'.format(', '.join(faulty_keys))) - Toolbox.verify_required_params(ProxySetup.PARAMS, proxy_configuration) + ExtensionsToolbox.verify_required_params(ProxySetup.PARAMS, proxy_configuration) vpools = VPoolList.get_vpools() service_manager = ServiceFactory.get_manager() with open('/root/old_proxies', 'w') as backup_file: diff --git a/setup/vpool.py b/setup/vpool.py index 914ac85..a926677 100644 --- a/setup/vpool.py +++ b/setup/vpool.py @@ -15,7 +15,7 @@ # but WITHOUT ANY WARRANTY of any kind. from ovs.extensions.generic.logger import Logger -from ovs.lib.helpers.toolbox import Toolbox +from ovs_extensions.generic.toolbox import ExtensionsToolbox from ovs.lib.generic import GenericController from ..helpers.backend import BackendHelper from ..helpers.ci_constants import CIConstants @@ -127,7 +127,7 @@ def add_vpool(cls, vpool_name, vpool_details, storagerouter_ip, proxy_amount=2, # Settings volumedriver storagedriver_config = vpool_details.get('storagedriver') if storagedriver_config is not None: - Toolbox.verify_required_params(VPoolSetup.STORAGEDRIVER_PARAMS, storagedriver_config) + ExtensionsToolbox.verify_required_params(VPoolSetup.STORAGEDRIVER_PARAMS, storagedriver_config) VPoolSetup.LOGGER.info('Updating volumedriver configuration of vPool `{0}` on storagerouter `{1}`.'.format(vpool_name, storagerouter_ip)) vpool = VPoolHelper.get_vpool_by_name(vpool_name) storagedriver = [sd for sd in vpool.storagedrivers if sd.storagerouter.ip == storagerouter_ip][0] From 2c90a172cff7cba546b0f7e5328cd4771fe9dec6 Mon Sep 17 00:00:00 2001 From: Jeffrey Devloo Date: Thu, 8 Feb 2018 15:09:27 +0100 Subject: [PATCH 23/28] Reflect Toolbox change --- setup/storagedriver.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup/storagedriver.py b/setup/storagedriver.py index 44fae1c..635948f 100644 --- a/setup/storagedriver.py +++ b/setup/storagedriver.py @@ -13,8 +13,8 @@ # # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. -from ovs.lib.helpers.toolbox import Toolbox from ovs.extensions.generic.logger import Logger +from ovs_extensions.generic.toolbox import ExtensionsToolbox from ..helpers.storagedriver import StoragedriverHelper from ..helpers.vpool import VPoolHelper @@ -32,7 +32,7 @@ def change_config(vpool_name, vpool_details, storagerouter_ip, *args, **kwargs): # Settings volumedriver storagedriver_config = vpool_details.get('storagedriver') if storagedriver_config is not None: - Toolbox.verify_required_params(StoragedriverSetup.STORAGEDRIVER_PARAMS, storagedriver_config) + ExtensionsToolbox.verify_required_params(StoragedriverSetup.STORAGEDRIVER_PARAMS, storagedriver_config) StoragedriverSetup.LOGGER.info('Updating volumedriver configuration of vPool `{0}` on storagerouter `{1}`.'.format(vpool_name, storagerouter_ip)) vpool = VPoolHelper.get_vpool_by_name(vpool_name) storagedriver = [sd for sd in vpool.storagedrivers if sd.storagerouter.ip == storagerouter_ip][0] From 3d295fd167014106021cbba30a81c4f6157a051c Mon Sep 17 00:00:00 2001 From: Simon Van den Bossche Date: Thu, 8 Feb 2018 10:02:22 +0100 Subject: [PATCH 24/28] Expose ISCSI helpers - Remove unused system --- helpers/iscsi.py | 78 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 helpers/iscsi.py diff --git a/helpers/iscsi.py b/helpers/iscsi.py new file mode 100644 index 0000000..1097c66 --- /dev/null +++ b/helpers/iscsi.py @@ -0,0 +1,78 @@ +# Copyright (C) 2018 iNuron NV +# +# This file is part of Open vStorage Open Source Edition (OSE), +# as available from +# +# http://www.openvstorage.org and +# http://www.openvstorage.com. +# +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU Affero General Public License v3 (GNU AGPLv3) +# as published by the Free Software Foundation, in version 3 as it comes +# in the LICENSE.txt file of the Open vStorage OSE distribution. +# +# Open vStorage is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY of any kind. +from ovs.dal.lists.iscsinodelist import IscsiNodeList +from ovs.lib.iscsinode import IscsiNodeController + + +class ISCSIHelper(object): + """ + Helper class for iSCSI nodes + """ + + @classmethod + def get_iscsi_nodes(cls): + """ + Get all available iSCSI nodes in the environment + :return: list containting iSCSI nodes + :rtype: DataList + """ + return IscsiNodeList.get_iscsi_nodes() + + @staticmethod + def expose_vdisk(iscsi_node_guid, vdisk_guid, username=None, password=None, acls=None): + """ + Expose a vDisk on the specified iSCSI Node + :param iscsi_node_guid: Guid of the iSCSI Node to expose the vDisk on + :type iscsi_node_guid: str + :param vdisk_guid: Guid of the vDisk to expose + :type vdisk_guid: str + :param username: User to which the Edge vDisk belongs to + :type username: str + :param password: Password linked to the user + :type password: str + :param acls: ACL information to enforce limited access to the vDisk + :type acls: list[str] + :return: IQN details + :rtype: str + """ + return IscsiNodeController.expose_vdisk(iscsi_node_guid=iscsi_node_guid, + vdisk_guid=vdisk_guid, + username=username, + password=password, + acls=acls) + + @staticmethod + def unexpose_vdisk(vdisk_guid): + """ + Un-expose a vDisk from all iSCSI Nodes its exposed on + :param vdisk_guid: Guid of the vDisk to un-expose + :type vdisk_guid: str + :return: None + :rtype: NoneType + """ + IscsiNodeController.unexpose_vdisk(vdisk_guid=vdisk_guid) + + @staticmethod + def restart_targets_for_vdisk(vdisk_guid): + """ + Restarts all targets for the vDisks + Deletes the current targets and re-creates them so the connections can be re-established + :param vdisk_guid: Guid of the vDisk to restart targets for + :type vdisk_guid: str + :return: None + :rtype: NoneType + """ + IscsiNodeController.restart_targets_for_vdisk(vdisk_guid=vdisk_guid) From c8f6bde3591d88f36366567495f9dba630d7871a Mon Sep 17 00:00:00 2001 From: Simon Van den Bossche Date: Thu, 8 Feb 2018 10:02:22 +0100 Subject: [PATCH 25/28] More delegation to api_lib --- helpers/system.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/helpers/system.py b/helpers/system.py index 4f8d570..bd313d6 100644 --- a/helpers/system.py +++ b/helpers/system.py @@ -129,3 +129,12 @@ def idle_till_ovs_is_up(ip, username, password=None, connection_timeout=300, ser elif service_state == activating_state: activating_services.append(service) return {'active': active_services, 'failed': failed_service, 'activating': activating_services} + + @classmethod + def get_my_storagerouter(cls): + """ + Returns unique machine storagerouter id + :return: Storage Router this is executed on + :rtype: StorageRouter + """ + return System.get_my_storagerouter() From f4dc6f22af70b26941bb07379758b43c5a978afc Mon Sep 17 00:00:00 2001 From: Simon Van den Bossche Date: Thu, 22 Feb 2018 16:56:56 +0100 Subject: [PATCH 26/28] Fix for integration tests relying on vm_handler that got hanging threads --- helpers/thread.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/helpers/thread.py b/helpers/thread.py index 612c2d9..6fe2a47 100644 --- a/helpers/thread.py +++ b/helpers/thread.py @@ -25,7 +25,8 @@ class ThreadHelper(object): @staticmethod def start_thread_with_event(target, name, args=(), kwargs=None): """ - Starts a thread and an event to it + Starts a thread and an event to it. + The passed target function needs to accept an param 'event' which will contain the stopEvent object :param target: target - usually a method :type target: object :param name: name of the thread @@ -37,9 +38,11 @@ def start_thread_with_event(target, name, args=(), kwargs=None): """ if kwargs is None: kwargs = {} + if 'event' in kwargs: + raise ValueError('event is a reserved keyword of this function') ThreadHelper.LOGGER.info('Starting thread with target {0}'.format(target)) event = threading.Event() - args = args + (event,) + kwargs['event'] = event thread = threading.Thread(target=target, args=tuple(args), kwargs=kwargs) thread.setName(str(name)) thread.setDaemon(True) From 5fff92bc68e674599bcdb1eae499f13855b4c57f Mon Sep 17 00:00:00 2001 From: Simon Van den Bossche Date: Thu, 22 Feb 2018 17:16:03 +0100 Subject: [PATCH 27/28] Revert "More delegation to api_lib" This reverts commit c8f6bde3591d88f36366567495f9dba630d7871a. --- helpers/system.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/helpers/system.py b/helpers/system.py index bd313d6..4f8d570 100644 --- a/helpers/system.py +++ b/helpers/system.py @@ -129,12 +129,3 @@ def idle_till_ovs_is_up(ip, username, password=None, connection_timeout=300, ser elif service_state == activating_state: activating_services.append(service) return {'active': active_services, 'failed': failed_service, 'activating': activating_services} - - @classmethod - def get_my_storagerouter(cls): - """ - Returns unique machine storagerouter id - :return: Storage Router this is executed on - :rtype: StorageRouter - """ - return System.get_my_storagerouter() From 65045d68e42137f4f3f9ed652a3d89e4336c2484 Mon Sep 17 00:00:00 2001 From: Simon Van den Bossche Date: Tue, 27 Feb 2018 16:13:45 +0100 Subject: [PATCH 28/28] Fix for the hanging thread issue: -implementation of threaded_server to offload listening to a threaded class -some cleanup of integrationtest classes Fix for issue where, after a failed cleanup of other tests, these vpools were eligible for integration testing. This is not intended --- helpers/ci_constants.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/helpers/ci_constants.py b/helpers/ci_constants.py index 6bc9f4e..540498c 100644 --- a/helpers/ci_constants.py +++ b/helpers/ci_constants.py @@ -43,3 +43,13 @@ def api(cls): return OVSClient(cls.SETUP_CFG['ci']['grid_ip'], cls.SETUP_CFG['ci']['user']['api']['username'], cls.SETUP_CFG['ci']['user']['api']['password']) + + @classmethod + def get_vpool_names(cls): + names = [] + for sr_ip, items in cls.STORAGEROUTER_INFO.iteritems(): + vpools = items.get('vpools') + for vp_name, vp_info in vpools.iteritems(): + if vp_name not in names: + names.append(vp_name) + return names \ No newline at end of file