diff --git a/docs/guide.md b/docs/guide.md
index 0d2c301..76fa3f5 100644
--- a/docs/guide.md
+++ b/docs/guide.md
@@ -3,7 +3,6 @@
## Description
This repository contains the automation library for Open vStorage.
-This library delegates component creation/removal to the REST API of Open vStorage through Python code.
## System requirements
@@ -26,17 +25,36 @@ This library delegates component creation/removal to the REST API of Open vStora
- Automation library HELPERS logging file `/var/log/ovs/helpers.log`
## Sections
+### Api Library
+This library delegates component creation/removal to the REST API of Open vStorage through Python code.
+
+#### Helpers section
+Contains functions to assist in removal, setup and validation of components such as backends, disks, storagerouters and -drivers, as well as gathering of metadata etc.
+
+#### Remove section
+Contains functions for removal of arakoon clusters, backends, roles, vDisks and vPools from Open vStorage.
+
+#### Setup section
+Contains functions to set up new arakoon clusters, backends, domains, proxies, roles, vDisks and vPools in Open vStorage.
-### Helpers section
-Contains helping function that provide required meta information during setup, removal or validation
+#### Validation section
+Contains function to validate functionality of Open vStorage components.
+This includes decorators for checking prerequisites of functions throughout the package.
-### Remove section
-Contains removal functions that makes it possible to remove components from Open vStorage
+###Scenario helpers section
+Classes in this section are used to execute the actual tests (referred to as[scenarios](#header_scenarios))
-### Setup section
-Contains setup functions that makes it possible to add components to Open vStorage
+###Scenarios section
+This section contains code for testing a variety of integration scenarios.\
+Currently present tests:
+- api checkup post-reboot
+- several arakoon related checks
+- addition and removal of
+ - backends
+ - storagerouters
+ - vDisks, vMachines and vPools
+- health checks
+- installation tests
+- hypervisor tests
-### Validation section
-Provides validation for setup or removal of Open vStorage components.
-E.g. when a vPool is added, the required components are checked if they are present
diff --git a/helpers/albanode.py b/helpers/albanode.py
index cfd0472..9e5dd94 100644
--- a/helpers/albanode.py
+++ b/helpers/albanode.py
@@ -13,12 +13,14 @@
#
# Open vStorage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY of any kind.
+
from ovs.dal.hybrids.albanode import AlbaNode
from ovs.dal.lists.albanodelist import AlbaNodeList
from ovs.extensions.generic.logger import Logger
+from ..helpers.ci_constants import CIConstants
-class AlbaNodeHelper(object):
+class AlbaNodeHelper(CIConstants):
"""
Alba node helper class
"""
@@ -26,19 +28,17 @@ class AlbaNodeHelper(object):
LOGGER = Logger('helpers-ci_albanode')
IGNORE_KEYS = ('_error', '_duration', '_version', '_success')
- @staticmethod
- def _map_alba_nodes(api):
+ @classmethod
+ def _map_alba_nodes(cls, *args, **kwargs):
"""
Will map the alba_node_id with its guid counterpart and return the map dict
- :param api: specify a valid api connection to the setup
- :type api: helpers.api.OVSClient
"""
- mapping = {}
+ mapping = {}
options = {
'contents': 'node_id,_relations',
}
- response = api.get(
+ response = cls.api.get(
api='alba/nodes',
params=options
)
diff --git a/helpers/api.py b/helpers/api.py
index 59e5b69..a57eb0a 100644
--- a/helpers/api.py
+++ b/helpers/api.py
@@ -193,17 +193,17 @@ def _call(self, api, params, func, **kwargs):
if self._volatile_client is not None:
self._token = self._volatile_client.get(self._key)
first_connect = self._token is None
- headers, url = self._prepare(params=params)
+ headers, _url = self._prepare(params=params)
try:
- return self._process(func(url=url.format(api), headers=headers, verify=self._verify, **kwargs))
+ return self._process(func(url=_url.format(api), headers=headers, verify=self._verify, **kwargs))
except ForbiddenException:
if self._volatile_client is not None:
self._volatile_client.delete(self._key)
if first_connect is True: # First connect, so no token was present yet, so no need to try twice without token
raise
self._token = None
- headers, url = self._prepare(params=params)
- return self._process(func(url=url.format(api), headers=headers, verify=self._verify, **kwargs))
+ headers, _url = self._prepare(params=params)
+ return self._process(func(url=_url.format(api), headers=headers, verify=self._verify, **kwargs))
except Exception:
if self._volatile_client is not None:
self._volatile_client.delete(self._key)
@@ -264,7 +264,12 @@ def wait_for_task(self, task_id, timeout=None):
if timeout is not None and timeout < (time.time() - start):
raise TimeOutError('Waiting for task {0} has timed out.'.format(task_id))
task_metadata = self.get('/tasks/{0}/'.format(task_id))
- print task_metadata
+ output = 'Task with ID: {0: >40}, current status: {1: >8}, ready: {2: >2}. Result data: {3}'.format(task_metadata['id'],
+ task_metadata['status'],
+ task_metadata['successful'],
+ task_metadata['result'])
+ print output
+ OVSClient._logger.debug(output)
finished = task_metadata['status'] in ('FAILURE', 'SUCCESS')
if finished is False:
if task_metadata != previous_metadata:
@@ -286,8 +291,6 @@ def _to_json(dict_or_json):
:return: json data
:rtype: string
"""
- try:
- json_object = json.loads(str(dict_or_json))
- except ValueError:
+ if isinstance(dict_or_json, dict):
return json.dumps(dict_or_json)
- return json_object
+ return dict_or_json
diff --git a/helpers/backend.py b/helpers/backend.py
index 2ac8e43..cd790fc 100644
--- a/helpers/backend.py
+++ b/helpers/backend.py
@@ -13,15 +13,17 @@
#
# Open vStorage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY of any kind.
+
+from ovs.dal.hybrids.albabackend import AlbaBackend
from ovs.dal.lists.albabackendlist import AlbaBackendList
from ovs.dal.lists.backendlist import BackendList
from ovs.dal.lists.backendtypelist import BackendTypeList
-from ovs.dal.hybrids.albabackend import AlbaBackend
from ovs.extensions.generic.logger import Logger
+from ..helpers.ci_constants import CIConstants
from ..helpers.exceptions import PresetNotFoundError, AlbaBackendNotFoundError
-class BackendHelper(object):
+class BackendHelper(CIConstants):
"""
BackendHelper class
"""
@@ -127,22 +129,20 @@ def get_albabackend_by_name(albabackend_name):
BackendHelper.LOGGER.error(error_msg)
raise NameError(error_msg)
- @staticmethod
- def get_asd_safety(albabackend_guid, asd_id, api):
+ @classmethod
+ def get_asd_safety(cls, albabackend_guid, asd_id, *args, **kwargs):
"""
Request the calculation of the disk safety
:param albabackend_guid: guid of the alba backend
:type albabackend_guid: str
:param asd_id: id of the asd
:type asd_id: str
- :param api: specify a valid api connection to the setup
- :type api: helpers.api.OVSClient
:return: asd safety
:rtype: dict
"""
params = {'asd_id': asd_id}
- task_guid = api.get('alba/backends/{0}/calculate_safety'.format(albabackend_guid), params=params)
- result = api.wait_for_task(task_id=task_guid, timeout=30)
+ task_guid = cls.api.get('alba/backends/{0}/calculate_safety'.format(albabackend_guid), params=params)
+ result = cls.api.wait_for_task(task_id=task_guid, timeout=30)
if result[0] is False:
errormsg = "Calculate safety for '{0}' failed with '{1}'".format(asd_id, result[1])
@@ -150,22 +150,19 @@ def get_asd_safety(albabackend_guid, asd_id, api):
raise RuntimeError(errormsg)
return result[1]
- @staticmethod
- def get_backend_local_stack(albabackend_name, api):
+ @classmethod
+ def get_backend_local_stack(cls, albabackend_name, *args, **kwargs):
"""
Fetches the local stack property of a backend
:param albabackend_name: backend name
:type albabackend_name: str
- :param api: specify a valid api connection to the setup
- :type api: helpers.api.OVSClient
"""
options = {
'contents': 'local_stack',
}
- return api.get(api='/alba/backends/{0}/'.format(BackendHelper.get_alba_backend_guid_by_name(albabackend_name)),
- params={'queryparams': options}
- )
+ return cls.api.get(api='/alba/backends/{0}/'.format(BackendHelper.get_alba_backend_guid_by_name(albabackend_name)),
+ params={'queryparams': options})
@staticmethod
def get_alba_backends():
diff --git a/helpers/ci_constants.py b/helpers/ci_constants.py
new file mode 100644
index 0000000..540498c
--- /dev/null
+++ b/helpers/ci_constants.py
@@ -0,0 +1,55 @@
+# Copyright (C) 2016 iNuron NV
+#
+# This file is part of Open vStorage Open Source Edition (OSE),
+# as available from
+#
+# http://www.openvstorage.org and
+# http://www.openvstorage.com.
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Affero General Public License v3 (GNU AGPLv3)
+# as published by the Free Software Foundation, in version 3 as it comes
+# in the LICENSE.txt file of the Open vStorage OSE distribution.
+#
+# Open vStorage is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY of any kind.
+import json
+from ci.api_lib.helpers.api import OVSClient
+
+
+class CIConstants(object):
+ """
+ Collection of multiple constants and constant related instances
+ """
+
+ CONFIG_LOC = "/opt/OpenvStorage/ci/config/setup.json"
+ TEST_SCENARIO_LOC = "/opt/OpenvStorage/ci/scenarios/"
+ TESTRAIL_LOC = "/opt/OpenvStorage/ci/config/testrail.json"
+
+ with open(CONFIG_LOC, 'r') as JSON_CONFIG:
+ SETUP_CFG = json.load(JSON_CONFIG)
+
+ HYPERVISOR_INFO = SETUP_CFG['ci'].get('hypervisor')
+ DOMAIN_INFO = SETUP_CFG['setup']['domains']
+ BACKEND_INFO = SETUP_CFG['setup']['backends']
+ STORAGEROUTER_INFO = SETUP_CFG['setup']['storagerouters']
+
+ class classproperty(property):
+ def __get__(self, cls, owner):
+ return classmethod(self.fget).__get__(None, owner)()
+
+ @classproperty
+ def api(cls):
+ return OVSClient(cls.SETUP_CFG['ci']['grid_ip'],
+ cls.SETUP_CFG['ci']['user']['api']['username'],
+ cls.SETUP_CFG['ci']['user']['api']['password'])
+
+ @classmethod
+ def get_vpool_names(cls):
+ names = []
+ for sr_ip, items in cls.STORAGEROUTER_INFO.iteritems():
+ vpools = items.get('vpools')
+ for vp_name, vp_info in vpools.iteritems():
+ if vp_name not in names:
+ names.append(vp_name)
+ return names
\ No newline at end of file
diff --git a/helpers/disk.py b/helpers/disk.py
index d6db106..d9d18e2 100644
--- a/helpers/disk.py
+++ b/helpers/disk.py
@@ -24,14 +24,10 @@ class DiskHelper(object):
DiskHelper class
"""
- def __init__(self):
- pass
-
@staticmethod
def get_diskpartitions_by_guid(diskguid):
"""
Fetch disk partitions by disk guid
-
:param diskguid: ip address of a storagerouter
:type diskguid: str
:return: list of DiskPartition Objects
@@ -41,54 +37,48 @@ def get_diskpartitions_by_guid(diskguid):
return [dp for dp in DiskPartitionList.get_partitions() if dp.disk_guid == diskguid]
@staticmethod
- def get_roles_from_disks(storagerouter_ip=None):
+ def get_roles_from_disks(storagerouter_guid=None):
"""
Fetch disk roles from all disks with optional storagerouter_ip
-
- :param storagerouter_ip: ip address of a storage router
- :type storagerouter_ip: str
+ :param storagerouter_guid: guid of a storage router
+ :type storagerouter_guid: str
:return: list of lists with roles
:rtype: list > list
"""
- if not storagerouter_ip:
+ if not storagerouter_guid:
return [partition.roles for disk in DiskList.get_disks() for partition in disk.partitions]
else:
- storagerouter_guid = StoragerouterHelper.get_storagerouter_guid_by_ip(storagerouter_ip)
return [partition.roles for disk in DiskList.get_disks()
if disk.storagerouter_guid == storagerouter_guid for partition in disk.partitions]
@staticmethod
- def get_disk_by_diskname(storagerouter_ip, disk_name):
+ def get_disk_by_diskname(storagerouter_guid, disk_name):
"""
- Get a disk object by storagerouter ip and disk name
-
- :param storagerouter_ip: ip address of a storage router
- :type storagerouter_ip: str
+ Get a disk object by storagerouter guid and disk name
+ :param storagerouter_guid: guid address of a storage router
+ :type storagerouter_guid: str
:param disk_name: name of a disk (e.g. sda)
:type disk_name: str
:return: disk object
:rtype: ovs.dal.hybrids.Disk
"""
-
- storagerouter = StoragerouterHelper.get_storagerouter_by_ip(storagerouter_ip=storagerouter_ip)
+ storagerouter = StoragerouterHelper.get_storagerouter_by_guid(storagerouter_guid=storagerouter_guid)
for disk in storagerouter.disks:
if disk.name == disk_name:
return disk
@staticmethod
- def get_roles_from_disk(storagerouter_ip, disk_name):
+ def get_roles_from_disk(storagerouter_guid, disk_name):
"""
Get the roles from a certain disk
-
- :param storagerouter_ip: ip address of a storage router
- :type storagerouter_ip: str
+ :param storagerouter_guid: guid address of a storage router
+ :type storagerouter_guid: str
:param disk_name: name of a disk (e.g. sda)
:type disk_name: str
:return: list of roles of all partitions on a certain disk
:rtype: list
"""
-
- disk = DiskHelper.get_disk_by_diskname(storagerouter_ip, disk_name)
+ disk = DiskHelper.get_disk_by_diskname(storagerouter_guid, disk_name)
roles_on_disk = []
if disk:
for diskpartition in disk.partitions:
@@ -96,4 +86,4 @@ def get_roles_from_disk(storagerouter_ip, disk_name):
roles_on_disk.append(role)
return roles_on_disk
else:
- raise RuntimeError("Disk with name `{0}` not found on storagerouter `{1}`".format(disk_name, storagerouter_ip))
+ raise RuntimeError("Disk with name `{0}` not found on storagerouter `{1}`".format(disk_name, storagerouter_guid))
diff --git a/helpers/fstab.py b/helpers/fstab.py
index 8e6765a..10b99ba 100644
--- a/helpers/fstab.py
+++ b/helpers/fstab.py
@@ -13,15 +13,19 @@
#
# Open vStorage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY of any kind.
+import inspect
+from ovs.extensions.generic.sshclient import SSHClient
+from ovs.extensions.generic.system import System
-class FstabHelper(file):
+class FstabHelper(object):
"""
Class to help with Fstab manipulations
Inherits from file class
"""
import os
+
class Entry(object):
"""
Entry class represents a non-comment line on the `/etc/fstab` file
@@ -42,17 +46,50 @@ def __init__(self, device, mountpoint, filesystem, options, d=0, p=0):
def __eq__(self, o):
return str(self) == str(o)
+ def __ne__(self, o):
+ return str(self) != str(o)
+
def __str__(self):
return "{} {} {} {} {} {}".format(self.device, self.mountpoint, self.filesystem, self.options, self.d, self.p)
+ def get(self, item):
+ if not isinstance(item,basestring):
+ raise ValueError('Specified parameter {0} must be a string')
+ item = item.lower()
+ if item in self.__dict__.keys():
+ if item == 'device':
+ return self.device
+ elif item == 'mountpoint':
+ return self.mountpoint
+ elif item == 'options':
+ return self.options
+ elif item == 'd':
+ return self.d
+ elif item == 'p':
+ return self.p
+ else:
+ return None
+ else:
+ raise ValueError('Specified parameter {0} not an attribute of Entry class.'.format(item))
+
DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
- def __init__(self, path=None):
+ _path = DEFAULT_PATH
+
+ def __init__(self, path=None, client=None):
+ """
+
+ :param path: path of the fstab file
+ :type path: str
+ """
if path:
self._path = path
else:
self._path = self.DEFAULT_PATH
- file.__init__(self._path, 'r+')
+ if client is None:
+ client = SSHClient(System.get_my_storagerouter(), username='root')
+ self.client = client
+
@staticmethod
def _hydrate_entry(line):
@@ -62,86 +99,48 @@ def _hydrate_entry(line):
:type line: str
:return:
"""
- return FstabHelper.Entry(*filter(lambda x: x not in ('', None), line.strip("\n").split(" ")))
-
- @property
- def entries(self):
- """
- Property containing all non-comment entries
- :return:
- """
- self.seek(0)
- for line in self.readlines():
- try:
- if not line.startswith("#"):
- yield self._hydrate_entry(line)
- except ValueError:
- pass
+ return FstabHelper.Entry(*filter(lambda x: x not in ('',' ', None), str(line).strip("\n").split(" ")))
def get_entry_by_attr(self, attr, value):
"""
- Returns an entry with where a attr has a specific value
+ Returns an entry with where an attr has a specific value
:param attr: attribute from the entry
:param value: value that the attribute should have
:return:
"""
- for entry in self.entries:
- e_attr = getattr(entry, attr)
+ entries = []
+ for line in self.client.file_read(self._path).strip().splitlines():
+ try:
+ if not line.startswith("#") and line.strip() is not '':
+ entries.append(self._hydrate_entry(line))
+ except ValueError:
+ pass
+ for entry in entries:
+ e_attr = entry.get(attr)
if e_attr == value:
return entry
return None
- def add_entry(self, entry):
- """
- Adds an entry in fstab
- :param entry: entry object to add to fstab
- :return:
- """
- if self.get_entry_by_attr('device', entry.device):
- return False
-
- self.write(str(entry) + '\n')
- self.truncate()
- return entry
-
def remove_entry(self, entry):
"""
Removes a line from fstab
:param entry:entry object
:return:
"""
- self.seek(0)
-
- lines = self.readlines()
-
- found = False
- line = None
- for index, line in enumerate(lines):
- if not line.startswith("#"):
- if self._hydrate_entry(line) == entry:
- found = True
- break
+ lines = self.client.file_read(self._path).strip().splitlines()
+ lines = [line for line in lines if not line.startswith('#') and self._hydrate_entry(line) != entry]
+ self.client.file_write(self._path, '\n'.join(lines))
- if not found:
- return False
- if line is not None:
- lines.remove(line)
-
- self.seek(0)
- self.write(''.join(lines))
- self.truncate()
- return True
-
- def remove_by_mountpoint(self, mountpoint):
+ def remove_by_mountpoint(self, mountpoint, client=None):
"""
Removes an entry by specific mountpoint
:param mountpoint: mountpoint
:return:
"""
+
entry = self.get_entry_by_attr('mountpoint', mountpoint)
if entry:
- return self.remove_entry(entry)
- return False
+ self.remove_entry(entry)
def add(self, device, mountpoint, filesystem, options=None, dump=None, pass_=None):
"""
@@ -154,4 +153,6 @@ def add(self, device, mountpoint, filesystem, options=None, dump=None, pass_=Non
:param pass_: order to check filesystem at reboot time
:return:
"""
- return self.add_entry(FstabHelper.Entry(device, mountpoint, filesystem, options, dump))
+ lines = self.client.file_read(self._path).strip().splitlines()
+ lines.append(str(FstabHelper.Entry(device, mountpoint, filesystem, options, dump)))
+ self.client.file_write(self._path, '\n'.join(lines))
diff --git a/helpers/hypervisor/apis/kvm/option_mapping.py b/helpers/hypervisor/apis/kvm/option_mapping.py
index 7f0eaad..5a4784e 100644
--- a/helpers/hypervisor/apis/kvm/option_mapping.py
+++ b/helpers/hypervisor/apis/kvm/option_mapping.py
@@ -104,7 +104,7 @@ class SdkOptionMapping(object):
"mac": {
"option": "mac",
"values": None,
- "default": "random",
+ "default": "RANDOM",
"type": str
},
}
diff --git a/helpers/hypervisor/apis/kvm/sdk.py b/helpers/hypervisor/apis/kvm/sdk.py
index 753754f..8aad53a 100644
--- a/helpers/hypervisor/apis/kvm/sdk.py
+++ b/helpers/hypervisor/apis/kvm/sdk.py
@@ -25,7 +25,7 @@
from ovs.extensions.generic.logger import Logger
from ovs.extensions.generic.sshclient import SSHClient
from ovs.extensions.generic.system import System
-from ovs.lib.helpers.toolbox import Toolbox
+from ovs_extensions.generic.toolbox import ExtensionsToolbox
from xml.etree import ElementTree
from xml.etree.ElementTree import Element
# Relative
@@ -627,7 +627,7 @@ def create_vm(self, name, vcpus, ram, disks, cdrom_iso=None, os_type=None, os_va
'hostname': (str, None),
'username': (str, None, False),
'password': (str, None, False)}
- Toolbox.verify_required_params(required_edge_params, edge_configuration)
+ ExtensionsToolbox.verify_required_params(required_edge_params, edge_configuration)
ovs_vm = True
command = ['virt-install']
options = ['--connect=qemu+ssh://{0}@{1}/system'.format(self.login, self.host),
@@ -674,6 +674,160 @@ def create_vm(self, name, vcpus, ram, disks, cdrom_iso=None, os_type=None, os_va
print ' '.join(command+options)
raise RuntimeError(msg)
+ def create_vm_from_cloud_init(self, name, vcpus, ram, boot_disk_size, bridge, ip, netmask, gateway, nameserver, amount_disks, size,
+ mountpoint, cloud_init_url, cloud_init_name, root_password, force=False):
+ """
+ Create vm from cloud init
+ :param name: Name of the vm
+ :param vcpus: amount of vcpus
+ :param ram: amount of ram (MB)
+ :param boot_disk_size: size of the boot disks (notation xGB)
+ :param bridge: network bridge name
+ :param ip: ip of the vm
+ :param netmask: netmask
+ :param gateway: gateway
+ :param nameserver: dns ip
+ :param amount_disks: amount of extra disks
+ :param size: size of the extra disks (notation xGB)
+ :param mountpoint: where the extra disks should be created
+ :param cloud_init_url: cloud init url
+ :param cloud_init_name: vmdk template name
+ :param root_password: root password of the vm
+ :param force: remove vm with the same name or used disks
+ :return:
+ """
+
+ template_directory = '/var/lib/libvirt/images'
+ vmdk_file = "{0}/{1}.vmdk".format(template_directory, cloud_init_name)
+ qcow_file = "{0}/{1}.qcow2".format(template_directory, cloud_init_name)
+ # Check if cloud_init already exists if not download vmdk
+ if not self.ssh_client.file_exists(vmdk_file):
+ self.ssh_client.run(["wget", "-O", vmdk_file, cloud_init_url])
+
+ if not self.ssh_client.file_exists(qcow_file):
+ self.ssh_client.run(["qemu-img", "convert", "-O", "qcow2", vmdk_file, qcow_file])
+
+ vm_directory = "{0}/{1}".format(template_directory, name)
+ user_data = "{0}/user-data".format(vm_directory)
+ meta_data = "{0}/meta-data".format(vm_directory)
+ ci_iso = "{0}/{1}.iso".format(vm_directory, name)
+ boot_disk = "{0}/{1}.qcow2".format(vm_directory, name)
+
+ meta_data_lines = [
+ 'instance-id: {0}'.format(uuid.uuid1()),
+ 'local-hostname: {0}'.format(name),
+ 'network-interfaces: |',
+ ' auto ens3',
+ ' iface ens3 inet static',
+ ' address {0}'.format(ip),
+ ' netmask {0}'.format(netmask),
+ ' gateway {0}'.format(gateway),
+ 'manage_resolve_conf: True',
+ 'resolv_conf:',
+ ' nameservers:[{0}]'.format(nameserver),
+ ''
+ ]
+
+ user_data_lines = [
+ '#cloud-config',
+ 'hostname: {0}'.format(name),
+ 'manage_etc_hosts: True',
+ 'disable_root: False',
+ 'password: {0}'.format(root_password),
+ 'ssh_pwauth: True',
+ 'chpasswd:',
+ ' list: |',
+ ' root:{0}'.format(root_password),
+ ' ubuntu:{0}'.format(root_password),
+ ' expire: False',
+ 'runcmd:',
+ ' - [sed, -ie, "s/PermitRootLogin prohibit-password/PermitRootLogin yes/", /etc/ssh/sshd_config]',
+ ' - [sed, -ie, "s/PasswordAuthentication no/PasswordAuthentication yes/", /etc/ssh/sshd_config]',
+ ' - [service, ssh, restart]',
+ ''
+ ]
+
+ # Check if vm already exists with this name
+ vm = None
+
+ try:
+ vm = self._conn.lookupByName(name)
+ except libvirt.libvirtError:
+ pass
+
+ if vm and force:
+ self.delete_vm(vm, True)
+ elif vm and not force:
+ raise Exception('VM {0} is still defined on this hypervisor. Use the force=True option to delete.'.format(name))
+
+ if self.ssh_client.dir_exists(vm_directory):
+ exists, used_disk, vm_name = self._check_disks_in_use([ci_iso, boot_disk])
+ if exists:
+ raise Exception("Virtual Disk {0} in used by {1}".format(used_disk, vm_name))
+
+ self.ssh_client.dir_delete(vm_directory)
+
+ self.ssh_client.dir_create(vm_directory)
+ # Copy template image
+ self.ssh_client.run(["cp", qcow_file, boot_disk])
+
+ # Resize image
+ self.ssh_client.run(["qemu-img", "resize", boot_disk, boot_disk_size])
+
+ # Create metadata and user data file
+ self.ssh_client.file_write(meta_data, '\n'.join(meta_data_lines))
+
+ self.ssh_client.file_write(user_data, '\n'.join(user_data_lines))
+
+ # Generate iso for cloud-init
+ self.ssh_client.run(["genisoimage", "-output", ci_iso, "-volid", "cidata", "-joliet", "-r", user_data, meta_data])
+
+ # Create extra disks
+ all_disks = [{'mountpoint': boot_disk, "format": "qcow2", "bus": "virtio"}]
+
+ if amount_disks > 0 and size > 0:
+ if not self.ssh_client.dir_exists(mountpoint):
+ raise Exception("Directory {0} doesn't exists.".format(mountpoint))
+
+ for i in xrange(1, amount_disks+1):
+ disk_path = "{0}/{1}_{2:02d}.qcow2".format(mountpoint, name, i,)
+ exists, used_disk, vm_name = self._check_disks_in_use([disk_path])
+ disk_exists_filesystem = self.ssh_client.file_exists(disk_path)
+ if disk_exists_filesystem and exists:
+ raise Exception("Virtual Disk {0} in used by {1}".format(used_disk, vm_name))
+ elif disk_exists_filesystem:
+ self.ssh_client.file_delete(disk_path)
+
+ self.ssh_client.run(['qemu-img', 'create', '-f', 'qcow2', disk_path, size])
+ all_disks.append({'mountpoint': disk_path, "format": "qcow2", "bus": "virtio"})
+
+ self.create_vm(name=name, vcpus=vcpus, ram=ram, disks=all_disks, cdrom_iso=ci_iso,
+ networks=[{"bridge": bridge, "model": "virtio"}], start=True)
+
+ def _check_disks_in_use(self, disk_paths):
+ """
+ Check if disks are in used
+ :param disks: list of disk paths
+ :type disks: list
+ :return: bool
+ """
+ for dom in self.get_vms():
+ dom_info = ElementTree.fromstring(dom.XMLDesc(0))
+ disks = dom_info.findall('.//disk')
+ for disk in disks:
+ if disk.find('source') is None:
+ continue
+ used_disk = disk.find('source').get('file')
+ if used_disk in disk_paths:
+ try:
+ return True, used_disk, dom_info.find('name').text
+ except AttributeError as ex:
+ msg = "Error during checking of VM's disks. Got {0}".format(str(ex))
+ logger.exception(msg)
+ return True, used_disk, 'Unknown vm name'
+
+ return False, '', ''
+
@staticmethod
def _update_xml_for_ovs(xml, edge_configuration):
"""
diff --git a/helpers/hypervisor/hypervisor.py b/helpers/hypervisor/hypervisor.py
index 4b88185..c0c4230 100644
--- a/helpers/hypervisor/hypervisor.py
+++ b/helpers/hypervisor/hypervisor.py
@@ -16,39 +16,74 @@
Hypervisor/ManagementCenter factory module
Using the module requires libvirt api to be available on the MACHINE THAT EXECUTES THE CODE
"""
-
from ovs_extensions.generic.filemutex import file_mutex
+from ovs_extensions.generic.toolbox import ExtensionsToolbox
+from ovs.lib.helpers.toolbox import Toolbox
+from ...helpers.ci_constants import CIConstants
-class HypervisorFactory(object):
+class HypervisorFactory(CIConstants):
"""
HypervisorFactory class provides functionality to get abstracted hypervisor
"""
-
hypervisors = {}
- @staticmethod
- def get(ip, username, password, hvtype):
+ @classmethod
+ def get(cls, hv_credentials=None):
"""
Returns the appropriate hypervisor client class for a given PMachine
+ :param hv_credentials: object that contains ip, user, password and hypervisor type
+ :type hv_credentials: HypervisorCredentials object
"""
- key = '{0}_{1}'.format(ip, username)
- if key not in HypervisorFactory.hypervisors:
- mutex = file_mutex('hypervisor_{0}'.format(key))
- try:
- mutex.acquire(30)
- if key not in HypervisorFactory.hypervisors:
- if hvtype == 'VMWARE':
- # Not yet tested. Needs to be rewritten
- raise NotImplementedError("{0} has not yet been implemented".format(hvtype))
- from .hypervisors.vmware import VMware
- hypervisor = VMware(ip, username, password)
- elif hvtype == 'KVM':
- from .hypervisors.kvm import KVM
- hypervisor = KVM(ip, username, password)
- else:
- raise NotImplementedError('Hypervisor {0} is not yet supported'.format(hvtype))
- HypervisorFactory.hypervisors[key] = hypervisor
- finally:
- mutex.release()
- return HypervisorFactory.hypervisors[key]
+ if hv_credentials is None:
+ return cls.get(HypervisorCredentials(ip=CIConstants.HYPERVISOR_INFO['ip'],
+ user=CIConstants.HYPERVISOR_INFO['user'],
+ password=CIConstants.HYPERVISOR_INFO['password'],
+ type=CIConstants.HYPERVISOR_INFO['type']))
+ if not isinstance(hv_credentials, HypervisorCredentials):
+ raise TypeError('Credentials must be of type HypervisorCredentials')
+ return cls.hypervisors.get(hv_credentials, cls._add_hypervisor(hv_credentials))
+
+ @staticmethod
+ def _add_hypervisor(hypervisor_credentials):
+ ip = hypervisor_credentials.ip
+ username = hypervisor_credentials.user
+ password = hypervisor_credentials.password
+ hvtype = hypervisor_credentials.type
+ mutex = file_mutex('hypervisor_{0}'.format(hash(hypervisor_credentials)))
+ try:
+ mutex.acquire(30)
+ if hypervisor_credentials not in HypervisorFactory.hypervisors:
+ if hvtype == 'VMWARE':
+ # Not yet tested. Needs to be rewritten
+ raise NotImplementedError("{0} has not yet been implemented".format(hvtype))
+ from .hypervisors.vmware import VMware
+ hypervisor = VMware(ip, username, password)
+ elif hvtype == 'KVM':
+ from .hypervisors.kvm import KVM
+ hypervisor = KVM(ip, username, password)
+ else:
+ raise NotImplementedError('Hypervisor {0} is not yet supported'.format(hvtype))
+ HypervisorFactory.hypervisors[hypervisor_credentials] = hypervisor
+ return hypervisor
+ finally:
+ mutex.release()
+
+
+class HypervisorCredentials(object):
+ def __init__(self, ip, user, password, type):
+ required_params = {'ip': (str, Toolbox.regex_ip),
+ 'user': (str, None),
+ 'password': (str, None),
+ 'type': (str, ['KVM', 'VMWARE'])}
+ ExtensionsToolbox.verify_required_params(required_params, {'ip': ip,
+ 'user': user,
+ 'password': password,
+ 'type': type})
+ self.ip = ip
+ self.user = user
+ self.password = password
+ self.type = type
+
+ def __str__(self):
+ return 'hypervisor at ip {0} of type {1}'.format(self.ip, self.type)
diff --git a/helpers/hypervisor/hypervisors/kvm.py b/helpers/hypervisor/hypervisors/kvm.py
index 899fb84..6a43231 100644
--- a/helpers/hypervisor/hypervisors/kvm.py
+++ b/helpers/hypervisor/hypervisors/kvm.py
@@ -46,6 +46,32 @@ def create_vm_from_template(self, name, source_vm, disks, ip, mountpoint, wait=T
_ = ip, wait # For compatibility purposes only
return self.sdk.create_vm_from_template(name, source_vm, disks, mountpoint)
+ def create_vm_from_cloud_init(self, name, vcpus, ram, boot_disk_size, bridge, ip, netmask, gateway, nameserver, amount_disks, size,
+ mountpoint, cloud_init_url, cloud_init_name, root_password, force=False):
+ """
+ Create vm from cloud init
+ :param name: Name of the vm
+ :param vcpus: amount of vcpus
+ :param ram: amount of ram (MB)
+ :param boot_disk_size: size of the boot disks (notation xGB)
+ :param bridge: network bridge name
+ :param ip: ip of the vm
+ :param netmask: netmask
+ :param gateway: gateway
+ :param nameserver: dns ip
+ :param amount_disks: amount of extra disks
+ :param size: size of the extra disks (notation xGB)
+ :param mountpoint: where the extra disks should be created
+ :param cloud_init_url: cloud init url
+ :param cloud_init_name: vmdk template name
+ :param root_password: root password of the vm
+ :param force: remove vm with the same name or used disks
+ :return:
+ """
+ return self.sdk.create_vm_from_cloud_init(name, vcpus, ram, boot_disk_size, bridge, ip, netmask, gateway, nameserver,
+ amount_disks, size, mountpoint, cloud_init_url, cloud_init_name,
+ root_password, force)
+
def delete_vm(self, vmid, storagedriver_mountpoint=None, storagedriver_storage_ip=None, devicename=None, disks_info=None, wait=True):
"""
Deletes a given VM and its disks
diff --git a/helpers/hypervisor/hypervisors/vmware.py b/helpers/hypervisor/hypervisors/vmware.py
index 55b39da..79a22f2 100644
--- a/helpers/hypervisor/hypervisors/vmware.py
+++ b/helpers/hypervisor/hypervisors/vmware.py
@@ -51,6 +51,30 @@ def create_vm_from_template(self, name, source_vm, disks, ip, mountpoint, wait=T
return task_info.info.result.value
return None
+ def create_vm_from_cloud_init(self, name, vcpus, ram, boot_disk_size, bridge, ip, netmask, gateway, nameserver, amount_disks, size,
+ mountpoint, cloud_init_url, cloud_init_name, root_password, force=False):
+ """
+ Create vm from cloud init
+ :param name: Name of the vm
+ :param vcpus: amount of vcpus
+ :param ram: amount of ram (MB)
+ :param boot_disk_size: size of the boot disks (notation xGB)
+ :param bridge: network bridge name
+ :param ip: ip of the vm
+ :param netmask: netmask
+ :param gateway: gateway
+ :param nameserver: dns ip
+ :param amount_disks: amount of extra disks
+ :param size: size of the extra disks (notation xGB)
+ :param mountpoint: where the extra disks should be created
+ :param cloud_init_url: cloud init url
+ :param cloud_init_name: vmdk template name
+ :param root_password: root password of the vm
+ :param force: remove vm with the same name or used disks
+ :return:
+ """
+ raise NotImplementedError
+
def clone_vm(self, vmid, name, disks, mountpoint, wait=False):
"""
Clone a vmachine
diff --git a/helpers/iscsi.py b/helpers/iscsi.py
new file mode 100644
index 0000000..1097c66
--- /dev/null
+++ b/helpers/iscsi.py
@@ -0,0 +1,78 @@
+# Copyright (C) 2018 iNuron NV
+#
+# This file is part of Open vStorage Open Source Edition (OSE),
+# as available from
+#
+# http://www.openvstorage.org and
+# http://www.openvstorage.com.
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Affero General Public License v3 (GNU AGPLv3)
+# as published by the Free Software Foundation, in version 3 as it comes
+# in the LICENSE.txt file of the Open vStorage OSE distribution.
+#
+# Open vStorage is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY of any kind.
+from ovs.dal.lists.iscsinodelist import IscsiNodeList
+from ovs.lib.iscsinode import IscsiNodeController
+
+
+class ISCSIHelper(object):
+ """
+ Helper class for iSCSI nodes
+ """
+
+ @classmethod
+ def get_iscsi_nodes(cls):
+ """
+ Get all available iSCSI nodes in the environment
+ :return: list containting iSCSI nodes
+ :rtype: DataList
+ """
+ return IscsiNodeList.get_iscsi_nodes()
+
+ @staticmethod
+ def expose_vdisk(iscsi_node_guid, vdisk_guid, username=None, password=None, acls=None):
+ """
+ Expose a vDisk on the specified iSCSI Node
+ :param iscsi_node_guid: Guid of the iSCSI Node to expose the vDisk on
+ :type iscsi_node_guid: str
+ :param vdisk_guid: Guid of the vDisk to expose
+ :type vdisk_guid: str
+ :param username: User to which the Edge vDisk belongs to
+ :type username: str
+ :param password: Password linked to the user
+ :type password: str
+ :param acls: ACL information to enforce limited access to the vDisk
+ :type acls: list[str]
+ :return: IQN details
+ :rtype: str
+ """
+ return IscsiNodeController.expose_vdisk(iscsi_node_guid=iscsi_node_guid,
+ vdisk_guid=vdisk_guid,
+ username=username,
+ password=password,
+ acls=acls)
+
+ @staticmethod
+ def unexpose_vdisk(vdisk_guid):
+ """
+ Un-expose a vDisk from all iSCSI Nodes its exposed on
+ :param vdisk_guid: Guid of the vDisk to un-expose
+ :type vdisk_guid: str
+ :return: None
+ :rtype: NoneType
+ """
+ IscsiNodeController.unexpose_vdisk(vdisk_guid=vdisk_guid)
+
+ @staticmethod
+ def restart_targets_for_vdisk(vdisk_guid):
+ """
+ Restarts all targets for the vDisks
+ Deletes the current targets and re-creates them so the connections can be re-established
+ :param vdisk_guid: Guid of the vDisk to restart targets for
+ :type vdisk_guid: str
+ :return: None
+ :rtype: NoneType
+ """
+ IscsiNodeController.restart_targets_for_vdisk(vdisk_guid=vdisk_guid)
diff --git a/helpers/setupjsongenerator.py b/helpers/setupjsongenerator.py
new file mode 100644
index 0000000..8169c89
--- /dev/null
+++ b/helpers/setupjsongenerator.py
@@ -0,0 +1,691 @@
+# Copyright (C) 2016 iNuron NV
+#
+# This file is part of Open vStorage Open Source Edition (OSE),
+# as available from
+#
+# http://www.openvstorage.org and
+# http://www.openvstorage.com.
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Affero General Public License v3 (GNU AGPLv3)
+# as published by the Free Software Foundation, in version 3 as it comes
+# in the LICENSE.txt file of the Open vStorage OSE distribution.
+#
+# Open vStorage is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY of any kind.
+import os
+import json
+from ci.autotests import AutoTests
+from ovs.dal.hybrids.albabackend import AlbaBackend
+from ovs.dal.hybrids.diskpartition import DiskPartition
+from ovs_extensions.generic.toolbox import ExtensionsToolbox
+from ovs.extensions.storageserver.storagedriver import StorageDriverClient
+from ovs.lib.helpers.toolbox import Toolbox
+
+
+class SetupJsonGenerator(object):
+ """
+ This provides class provides code to automate construction of a setup.json file.
+ Addition and removal of several components of the setup.json is provided
+ """
+ HYPERV_KVM = 'KVM'
+ VPOOL_COUNTER = 1
+
+ def __init__(self):
+ self._json_dict = {}
+ self._presets = []
+ self._domains = []
+ self._backends = []
+ self._ips = []
+
+ @property
+ def config(self):
+ """
+ Property containing the currently modelled config dict
+ :return: return the currently modelled config dict
+ :rtype: dict
+ """
+ return self._json_dict
+
+ def dump_json_to_file(self, path):
+ """
+ Write current setup dict to a json file in the provided path.
+ :param path: path to dump json file to
+ :type path: str
+ """
+ with open(path, 'w') as fp:
+ json.dump(self.config, indent=4, sort_keys=True, fp=fp)
+
+ def update_scenarios(self, scenarios=None):
+ """
+ Add scenarios to be scheduled in the setup.
+ :param scenarios: scenarios to add to the 'scenarios' section of the setup.json.
+ If left blank, by default all scenarios will be scheduled.
+ :type scenarios: list
+ """
+ if not isinstance(scenarios, list) and scenarios is not None:
+ raise ValueError('Scenarios should be passed in a list format, not {}'.format(type(scenarios)))
+
+ if scenarios is None:
+ self.config['scenarios'] = ['ALL']
+ else:
+ for scenario in scenarios:
+ if isinstance(scenario, str) and scenario not in AutoTests.list_tests():
+ raise ValueError('Scenario {0} is not a valid scenario path.'.format(scenario))
+
+ self.config['scenarios'] = scenarios
+
+ def update_ci(self, ci_params):
+ """
+ Set the ci constants of the setup file accordign to the passed parameters.
+ :param passed_required_params: obligatory parameters for the setup file
+ :type passed_required_params: dict
+ :param passed_optional_params: optional parameters
+ :type passed_optional_params: dict
+ """
+ params_layout = {'setup': (bool, None, True),
+ 'grid_ip': (str, Toolbox.regex_ip, True),
+ 'validation': (bool, None, False),
+ 'cleanup': (bool, None, False),
+ 'send_to_testrail': (bool, None, False),
+ 'fail_on_failed_scenario': (bool, None, False),
+ 'scenarios': (bool, None, False),
+ 'scenario_retries': (int, {'min': 1}, False),
+ 'version': (str, ['andes', 'unstable', 'fargo', 'develop'], False),
+ 'config_manager': (str, 'arakoon', False)}
+
+ all_params = {'validation': False,
+ 'cleanup': False,
+ 'send_to_testrail': True,
+ 'fail_on_failed_scenario': True,
+ 'scenarios': True,
+ 'scenario_retries': 1,
+ 'version': 'andes',
+ 'config_manager': 'arakoon'}
+
+ all_params.update(ci_params)
+
+ ExtensionsToolbox.verify_required_params(required_params=params_layout, actual_params=all_params, verify_keys=True)
+
+ if os.system('ping -c 1 {}'.format(all_params['grid_ip'])) != 0:
+ raise ValueError('No response from ip {0}'.format(all_params['grid_ip']))
+
+ ci = {'setup': all_params['setup'],
+ 'cleanup': all_params['cleanup'],
+ 'send_to_testrail': all_params['send_to_testrail'],
+ 'fail_on_failed_scenario': all_params['fail_on_failed_scenario'],
+ 'version': all_params['version'],
+ 'scenarios': all_params['scenarios'],
+ 'local_hypervisor': {'type': SetupJsonGenerator.HYPERV_KVM,
+ 'user': 'root',
+ 'password': 'rooter'},
+ 'config_manager': all_params['config_manager'],
+ 'user': {'shell': {'username': 'root',
+ 'password': 'rooter'},
+ 'api': {'username': 'admin',
+ 'password': 'admin'}},
+ 'grid_ip': all_params['grid_ip']}
+ self._json_dict['ci'] = ci
+
+ def add_hypervisor(self, hypervisor_ip, hypervisor_type=HYPERV_KVM, username='root', password='rooter', virtual_machines=None):
+ """
+ Add hypervisor information to the model
+ :param hypervisor_type:
+ :param hypervisor_ip: ip of the hypervisor itself
+ :type hypervisor_ip: str
+ :param virtual_machines: dict containing the virtual machine ip with their name and according role
+ :type virtual_machines: dict
+ example: {1.1.1.1: {'name': 'name1','role': 'VOLDRV'}}
+ :param username: username to be used in the hypervisor setup
+ :type username: str
+ :param password: password to be used in the hypervisor setup
+ :type password: str
+
+ """
+ if 'ci' not in self._json_dict:
+ raise ValueError('CI constants have to be set before adding hypervisors')
+ self._validate_ip(hypervisor_ip)
+
+ if virtual_machines is None:
+ vm_ip = self.config['ci']['grid_ip']
+ name = 'ubuntu_node_'+str(vm_ip.split('.', 2)[-1]).strip()
+ virtual_machines = {vm_ip: {'name': name, 'role': 'COMPUTE'}}
+
+ if not isinstance(virtual_machines, dict):
+ raise ValueError('Dict of virtual machines should contain entries like { ip: { `name`: `role`}}')
+ for key, value in virtual_machines.iteritems():
+ self._validate_ip(key)
+
+ hypervisor_dict = {'type': hypervisor_type,
+ 'user': username,
+ 'password': password,
+ 'ip': hypervisor_ip,
+ 'vms': virtual_machines}
+
+ self._ips.extend(virtual_machines.keys())
+ if 'hypervisor' not in self.config['ci']:
+ self.config['ci']['hypervisor'] = {}
+ self.config['ci']['hypervisor'] = hypervisor_dict
+
+ def remove_hypervisor(self, hypervisor_ip):
+ """
+ remove the hypervisor with the given ip, if present
+ :param hypervisor_ip: ip address of the hypervisor to remove
+ :type hypervisor_ip: str
+ """
+ if self.config['ci']['hypervisor']['ip'] == hypervisor_ip:
+ self.config['ci'].pop('hypervisor')
+
+ def add_domain(self, domain):
+ """
+ Add available domains to the model.
+ :param domain: domainname to add
+ :type domain: str
+ """
+ if not isinstance(domain, str):
+ raise ValueError('domain is no string')
+ self._domains.append(domain)
+ if 'setup' not in self.config.keys():
+ self.config['setup'] = {}
+ if 'domains' not in self.config['setup'].keys():
+ self.config['setup']['domains'] = []
+ self.config['setup']['domains'].append(domain)
+
+ def remove_domain(self, domain):
+ """
+ Remove a domain from the model
+ :param domain: domain to be removed
+ :type domain: str
+ """
+ try:
+ self.config['setup']['domains'].remove(domain)
+ if self.config['setup']['domains'] == []:
+ self.config['setup'].pop('domains')
+ except KeyError:
+ pass
+
+ def add_storagerouter(self, storagerouter_ip, hostname):
+ """
+ Add a storagerouter to the model given the provided ip and hostname.
+ :param storagerouter_ip: ip address of the storage router
+ :type storagerouter_ip: str
+ :param hostname: hostname of the storagerouter
+ :type hostname: str
+ """
+ self._validate_ip(storagerouter_ip)
+ required_params = {'hostname': (str, None, True)}
+ ExtensionsToolbox.verify_required_params(required_params=required_params, actual_params={'hostname': hostname}, verify_keys=True)
+ if 'setup' not in self.config.keys():
+ self.config['setup'] = {}
+ if 'storagerouters' in self.config['setup'].keys():
+ if storagerouter_ip in self.config['setup']['storagerouters']:
+ raise ValueError('Storagerouter with given ip {0} already defined.'.format(storagerouter_ip))
+ else:
+ if 'storagerouters' not in self.config['setup']:
+ self.config['setup']['storagerouters'] = {}
+ self.config['setup']['storagerouters'][storagerouter_ip] = {'hostname': hostname}
+
+ def remove_storagerouter(self, storagerouter_ip):
+ """
+ If a storagerouter with the given ip is present in the model, remove it.
+ :param storagerouter_ip: ip to remove
+ :type storagerouter_ip: str
+ """
+ try:
+ self.config['setup']['storagerouters'].pop(storagerouter_ip)
+ except Exception:
+ pass
+
+ def add_disk_to_sr(self, storagerouter_ip, name, roles):
+ """
+ Add disk with given name and roles to a storagerouter in the model.
+ :param storagerouter_ip:
+ :type storagerouter_ip: str
+ :param name: name of the disk
+ :type name: str
+ :param roles: roles to assign to the disk
+ :type roles: list
+ """
+ self._valid_storagerouter(storagerouter_ip)
+ required_params = {'name': (str, None, True), 'roles': (list, None, True)}
+ ExtensionsToolbox.verify_required_params(required_params=required_params, actual_params={'name': name, 'roles': roles}, verify_keys=True)
+ for role in roles:
+ if role not in DiskPartition.ROLES:
+ raise ValueError('Provided role {0} is not an allowed role for disk {1}.'.format(role, name))
+ disk_dict = {name: {'roles': roles}}
+ if 'disks' not in self.config['setup']['storagerouters'][storagerouter_ip]:
+ self.config['setup']['storagerouters'][storagerouter_ip]['disks'] = {}
+ self.config['setup']['storagerouters'][storagerouter_ip]['disks'].update(disk_dict)
+
+ def remove_disk_from_sr(self, storagerouter_ip, name):
+ """
+ Remove given disk from the specified storagerouter
+ :param storagerouter_ip: storagerouter to remove disk from
+ :type storagerouter_ip: str
+ :param name: name of the disk to be removed
+ :type name: str
+ """
+ try:
+ self.config['setup']['storagerouters'][storagerouter_ip]['disks'].pop(name)
+ except Exception:
+ pass
+
+ def add_domain_to_sr(self, storagerouter_ip, name, recovery=False):
+ """
+ Add domains, present in the model, to a storage router.
+ :param storagerouter_ip: ip of the storage router
+ :type storagerouter_ip: str
+ :param name: name of the domain to add to the storagerouter
+ :type name: str
+ :param recovery: true or false whether the domain is a recovery domain or not
+ :type recovery: bool
+ """
+ self._valid_storagerouter(storagerouter_ip)
+ ExtensionsToolbox.verify_required_params(required_params={'name': (str, None, True)}, actual_params={'name': name}, verify_keys=True)
+
+ if name not in self._domains:
+ raise ValueError('Invalid domain passed: {0}'.format(name))
+
+ path = self.config['setup']['storagerouters'][storagerouter_ip]
+ if 'domains' not in path.keys():
+ path['domains'] = {}
+ path = path['domains']
+ config_key = 'domain_guids' if recovery is False else 'recovery_domain_guids'
+ if config_key not in path:
+ path[config_key] = []
+ path[config_key].append(name)
+
+ def remove_domain_from_sr(self, storagerouter_ip, name):
+ """
+ Remove the given domain from the storagerouter
+ :param storagerouter_ip: storagerouter to remove the domains from
+ :type storagerouter_ip: str
+ :param name: name of the domain to remove
+ :type name: str
+ """
+ try:
+ self.config['setup']['storagerouters'][storagerouter_ip]['domains']['domain_guids'].remove(name)
+ except Exception:
+ pass
+
+ def add_backend(self, backend_name, domains=None, scaling='LOCAL'):
+ """
+ Add a backend with provided domains and scaling to the model.
+ :param backend_name: name of the backend
+ :type backend_name: str
+ :param domains: domains the backend is linked to
+ :type domains: {}
+ :param scaling:
+ :type scaling: str
+ """
+ if domains is None:
+ domains = []
+ else:
+ for domain_name in domains:
+ if domain_name not in self._domains:
+ raise ValueError('Invalid domain passed: {0}'.format(domain_name))
+
+ ExtensionsToolbox.verify_required_params(required_params={'backend_name': (str, Toolbox.regex_backend, True),
+ 'domains': (list, self._domains, True),
+ 'scaling': (str, AlbaBackend.SCALINGS, True)},
+ actual_params={'backend_name': backend_name,
+ 'domains': domains,
+ 'scaling': scaling}, verify_keys=True)
+ be_dict = {'name': backend_name,
+ 'domains': {'domain_guids': domains},
+ 'scaling': scaling}
+ if 'setup' not in self.config.keys():
+ self.config['setup'] = {}
+ self._backends.append(be_dict['name'])
+ if 'backends' not in self.config['setup']:
+ self.config['setup']['backends'] = []
+ self.config['setup']['backends'].append(be_dict)
+
+ def remove_backend(self, backend_name):
+ """
+ Remove backend with given name from model
+ :param backend_name: name of the backend to remove
+ :type backend_name: str
+ """
+ for index, backend in enumerate(self.config['setup']['backends']):
+ if backend['name'] == backend_name:
+ self.config['setup']['backends'].pop(index)
+
+ def add_preset_to_backend(self, backend_name, preset_name, policies, compression='snappy', encryption='none', fragment_size=2097152):
+ """
+ Add a preset with provided parameters to given backend.
+ :param backend_name: name of the backend to which the preset should be added
+ :type backend_name: str
+ :param preset_name: name of the preset that should be added
+ :type preset_name: str
+ :param policies: nested list of policies
+ :type policies: list
+ :param compression: compression level
+ :type compression: str
+ :param encryption: encryption level
+ :type encryption: str
+ :param fragment_size:
+ :type fragment_size: int
+ """
+ if backend_name not in self._backends:
+ raise ValueError('Invalid backend passed as argument: {0}'.format(backend_name))
+
+ self._check_policies(policies)
+
+ compression_options = ['snappy', 'bz2', 'none']
+ if compression not in compression_options:
+ raise ValueError('Invalid compression format specified, please choose from: "{0}"'.format('', ''.join(compression_options)))
+
+ encryption_options = ['aes-cbc-256', 'aes-ctr-256', 'none']
+ if encryption not in encryption_options:
+ raise ValueError('Invalid encryption format specified, please choose from: "{0}"'.format('', ''.join(encryption_options)))
+
+ if fragment_size is not None and (not isinstance(fragment_size, int) or not 16 <= fragment_size <= 1024 ** 3):
+ raise ValueError('Fragment size should be a positive integer smaller than 1 GiB')
+
+ ExtensionsToolbox.verify_required_params(required_params={'backend_name': (str, Toolbox.regex_backend, True),
+ 'preset_name': (str, Toolbox.regex_preset, True),
+ 'policies': (list, None, True),
+ 'fragment_size': (int, None, False)},
+ actual_params={'backend_name': backend_name,
+ 'preset_name': preset_name,
+ 'policies': policies,
+ 'fragment_size': fragment_size},
+ verify_keys=True)
+
+ if encryption is None:
+ encryption = 'none'
+ preset_dict = {
+ 'name': preset_name,
+ 'compression': compression,
+ 'encryption': encryption,
+ 'policies': policies,
+ 'fragment_size': fragment_size,
+ }
+ self._presets.append(preset_dict['name'])
+ for index, backend in enumerate(self.config['setup']['backends']):
+ if backend['name'] == backend_name:
+ if 'presets' not in backend:
+ self.config['setup']['backends'][index]['presets'] = []
+ self.config['setup']['backends'][index]['presets'].append(preset_dict)
+
+ def remove_preset_from_backend(self, backend_name, preset_name):
+ """
+ Remove the preset from given backend
+ :param backend_name: name of the backend in which to search
+ :type backend_name: str
+ :param preset_name: preset name to remove
+ :type preset_name: str
+ """
+ try:
+ for index, backend in enumerate(self.config['setup']['backends']):
+ if backend['name'] == backend_name:
+ if 'presets' in backend:
+ for jndex, preset in enumerate(self.config['backends'][index]['presets']):
+ if preset['name'] == preset_name:
+ self.config['setup']['backends'][index]['presets'].pop(jndex)
+ except Exception:
+ pass
+
+ def add_osd_to_backend(self, backend_name, osds_on_disks=None, linked_backend=None, linked_preset=None):
+ """
+ Add an osd to given backend.
+ :param backend_name:
+ :type backend_name: str
+ :param osds_on_disks:
+ :type osds_on_disks: dict
+ example: {'1.1.1.1': {'disk1': 2, 'disk2': 2}
+ :param linked_backend:
+ :type linked_backend: str
+ :param linked_preset:
+ :type linked_preset: str
+ """
+ if osds_on_disks is None:
+ osds_on_disks = {}
+ if backend_name not in self._backends:
+ raise ValueError('Invalid backend passed as argument: {0}'.format(backend_name))
+ required_params = {'backend_name': (str, None, True),
+ 'osds_on_disk': (dict, None, False),
+ 'linked_backend': (str, Toolbox.regex_backend, False),
+ 'linked_preset': (str, Toolbox.regex_preset, False)}
+ actual_params = {'backend_name': backend_name,
+ 'osds_on_disk': osds_on_disks,
+ 'linked_backend': linked_backend,
+ 'linked_preset': linked_preset}
+ ExtensionsToolbox.verify_required_params(required_params=required_params, actual_params=actual_params, verify_keys=True)
+
+ osd_dict = {}
+ for index, backend in enumerate(self.config['setup']['backends']):
+ if backend['name'] == backend_name:
+ scaling = backend['scaling']
+ if scaling == 'LOCAL':
+ if osds_on_disks is None:
+ raise ValueError('Osd dictionary required')
+ osd_dict = osds_on_disks
+ elif scaling == 'GLOBAL':
+ if linked_backend not in self._backends:
+ raise ValueError('Provided backend {0} not in known backends'.format(linked_backend))
+ if linked_preset not in self._presets:
+ raise ValueError('Provided preset {0} not in known presets'.format(linked_preset))
+ osd_dict = {linked_backend: linked_preset}
+
+ else:
+ raise ValueError('invalid scaling ({0}) passed'.format(scaling))
+ if 'osds' not in backend:
+ self.config['setup']['backends'][index]['osds'] = {}
+ self.config['setup']['backends'][index]['osds'].update(osd_dict)
+
+ def remove_osd_from_backend(self, osd_identifier, backend_name):
+ """
+ Remove the osd from given backend
+ :param backend_name: name of the backend in which to search
+ :type backend_name: str
+ :param osd_identifier: osd name to remove
+ :type osd_identifier: str
+ """
+ try:
+ for index, backend in enumerate(self.config['setup']['backends']):
+ if backend['name'] == backend_name:
+ self.config['setup']['backends'][index]['osds'].pop(osd_identifier)
+ except Exception:
+ pass
+
+ def add_vpool(self, storagerouter_ip, backend_name, preset_name, storage_ip, vpool_name=None):
+ """
+ Add a vpool to given storagerouter
+ :param storagerouter_ip
+ :type storagerouter_ip: str
+ :param vpool_name: name of the vpool to add
+ :type vpool_name: str
+ :param backend_name: name of the backend to link to the vpool
+ :type backend_name: str
+ :param preset_name: name of the preste to link to the vpool
+ :type preset_name: str
+ :param storage_ip:
+ :type storage_ip: str
+ """
+
+ if vpool_name is None:
+ vpool_name = 'myvpool{0}'.format(self.VPOOL_COUNTER)
+ SetupJsonGenerator.VPOOL_COUNTER += 1
+
+ required_params = {'storagerouter_ip': (str, Toolbox.regex_ip, True),
+ 'vpool_name': (str, None, False),
+ 'backend_name': (str, None, True),
+ 'preset_name': (str, None, True),
+ 'storage_ip': (str, Toolbox.regex_ip, True)}
+
+ actual_params = {'storagerouter_ip': storagerouter_ip,
+ 'vpool_name': vpool_name,
+ 'backend_name': backend_name,
+ 'preset_name': preset_name,
+ 'storage_ip': storage_ip}
+
+ ExtensionsToolbox.verify_required_params(required_params=required_params, actual_params=actual_params, verify_keys=True)
+ self._valid_storagerouter(storagerouter_ip=storagerouter_ip)
+ self._validate_ip(ip=storage_ip)
+ if backend_name not in self._backends:
+ raise ValueError('Provided backend {0} not in known backends'.format(backend_name))
+ if preset_name not in self._presets:
+ raise ValueError('Provided preset not in known presets'.format(preset_name))
+ vpool_dict = {'backend_name': backend_name,
+ 'preset': preset_name,
+ 'storage_ip': storage_ip,
+ 'proxies': 1,
+ 'fragment_cache': {'strategy': {'cache_on_read': False, 'cache_on_write': False},
+ 'location': 'disk'},
+ 'block_cache': {'strategy': {'cache_on_read': False, 'cache_on_write': False},
+ 'location': 'disk'}
+ }
+ if 'vpools' not in self.config['setup']['storagerouters'][storagerouter_ip]:
+ self.config['setup']['storagerouters'][storagerouter_ip]['vpools'] = {}
+ self.config['setup']['storagerouters'][storagerouter_ip]['vpools'][vpool_name] = vpool_dict
+
+ def remove_vpool(self, storagerouter_ip, vpool_name):
+ """
+ Try to remove a vpool on storagerouter with given ip
+ :param storagerouter_ip: search for vpool on given storagerouter
+ :type storagerouter_ip: str
+ :param vpool_name: remove vpool with this name
+ :type vpool_name: str
+ """
+ try:
+ self.config['setup']['storagerouters'][storagerouter_ip]['vpools'].pop(vpool_name)
+ except Exception:
+ pass
+
+ def change_cache(self, storagerouter_ip, vpool, block_cache=True, fragment_cache=True, on_read=True, on_write=True):
+ """
+ Change the caching parameters of a given vpool on a given storagerouter. By default, change parameters of both block chache and fragment cache.
+ :param storagerouter_ip: search for vpool on this storagerouter
+ :type storagerouter_ip: str
+ :param vpool: change cache options of given vpool
+ :type vpool: str
+ :param block_cache: change block cache parameters, default True
+ :type block_cache: bool
+ :param fragment_cache: change fragment cache parameters, default True
+ :type fragment_cache: bool
+ :param on_read: change onread parameters, default True
+ :type on_read: bool
+ :param on_write: chance onwrite parameters, default True
+ :type on_write: bool
+ """
+ self._valid_storagerouter(storagerouter_ip=storagerouter_ip)
+
+ required_params = {'vpool': (str, None, True),
+ 'block_cache': (bool, None, False),
+ 'fragment_cache': (bool, None, False),
+ 'on_read': (bool, None, False),
+ 'on_write': (bool, None, False)}
+ actual_params = {'vpool': vpool,
+ 'block_cache': block_cache,
+ 'fragment_cache': fragment_cache,
+ 'on_read': on_read,
+ 'on_write': on_write}
+ ExtensionsToolbox.verify_required_params(required_params=required_params, actual_params=actual_params, verify_keys=True)
+ try:
+ vpool = self.config['setup']['storagerouters'][storagerouter_ip]['vpools'][vpool]
+ except KeyError:
+ raise ValueError('Vpool {0} not found'.format(vpool))
+ if block_cache is True:
+ vpool['block_cache']['strategy']['cache_on_read'] = on_read
+ vpool['block_cache']['strategy']['cache_on_write'] = on_write
+ if fragment_cache is True:
+ vpool['fragment_cache']['strategy']['cache_on_read'] = on_read
+ vpool['fragment_cache']['strategy']['cache_on_write'] = on_write
+
+ def update_storagedriver_of_vpool(self, sr_ip, vpool_name, sr_params=None):
+ '''
+ Update all or some data of a storagedriver, assigned to a vpool on a specific storagerouter.
+ :param sr_ip: ip of the storagerouter on which the vpool is located
+ :type sr_ip: str
+ :param vpool_name: name of the vpool of which to update the storagedriver data
+ :type vpool_name: str
+ :param sr_params: parameters to update of the referenced storagedriver
+ :type sr_params: dict
+ '''
+ required_params = {'sco_size': (int, StorageDriverClient.TLOG_MULTIPLIER_MAP.keys()),
+ 'cluster_size': (int, StorageDriverClient.CLUSTER_SIZES),
+ 'volume_write_buffer': (int, {'min': 128, 'max': 10240}, False),
+ 'global_read_buffer': (int, {'min': 128, 'max': 10240}, False),
+ 'strategy': (str, None, False),
+ 'deduplication': (str, None, False),
+ 'dtl_transport': (str, StorageDriverClient.VPOOL_DTL_TRANSPORT_MAP.keys()),
+ 'dtl_mode': (str, StorageDriverClient.VPOOL_DTL_MODE_MAP.keys())}
+
+ default_params = {'sco_size': 4,
+ 'cluster_size': 4,
+ 'volume_write_buffer': 512,
+ 'strategy': 'none',
+ 'global_write_buffer': 128,
+ 'global_read_buffer': 128,
+ 'deduplication': 'non_dedupe',
+ 'dtl_transport': 'tcp',
+ 'dtl_mode': 'sync'}
+
+ if sr_params is None:
+ sr_params = {}
+ default_params.update(sr_params)
+ if not isinstance(default_params, dict):
+ raise ValueError('Parameters should be of type "dict"')
+ ExtensionsToolbox.verify_required_params(required_params, default_params)
+ if sr_ip not in self.config['setup']['storagerouters'].keys():
+ raise KeyError('Storagerouter with ip is not defined')
+ if vpool_name not in self.config['setup']['storagerouters'][sr_ip]['vpools']:
+ raise KeyError('Vpool with name {0} is not defined on storagerouter with ip {1}'.format(vpool_name, sr_ip))
+ self.config['setup']['storagerouters'][sr_ip]['vpools'][vpool_name]['storagedriver'] = default_params
+
+ def remove_storagedriver_from_vpool(self, sr_ip, vpool_name):
+ '''
+ Remove the storagedriver details on given vpool of given storagerouter.
+ :param sr_ip: ip of the storagerouter on which the vpool is located
+ :type sr_ip: str
+ :param vpool_name: name of the vpool of which to update the storagedriver data
+ :type vpool_name: str
+ '''
+ try:
+ self.config['setup']['storagerouters'][sr_ip]['vpools'][vpool_name].pop('storagedriver')
+ except Exception:
+ pass
+
+ def _valid_storagerouter(self, storagerouter_ip):
+ self._validate_ip(storagerouter_ip)
+ if storagerouter_ip not in self.config['setup']['storagerouters']:
+ raise ValueError('Storagerouter with ip {0} not found in json'.format(storagerouter_ip))
+
+ def _validate_ip(self, ip):
+ required_params = {'storagerouter_ip': (str, Toolbox.regex_ip, True)}
+ try:
+ ExtensionsToolbox.verify_required_params(required_params=required_params, actual_params={'storagerouter_ip': ip}, verify_keys=True)
+ except RuntimeError as e:
+ raise ValueError(e)
+ if os.system('ping -c 1 {0}'.format(ip)) != 0:
+ raise ValueError('No response from ip {0}'.format(ip))
+
+ def _check_policies(self, policies):
+ class _Policy(object):
+ def __init__(self, policy):
+ if not isinstance(policy, list) or len(policy) != 4:
+ raise ValueError('Policy {0} must be of type list with length = 4'.format(policy))
+ self.k, self.c, self.m, self.x = policy
+ if all(isinstance(entry, int) for entry in policy) is False:
+ raise ValueError('All policy entries should be integers')
+
+ def get_policy_as_dict(self):
+ return {'k': self.k, 'c': self.c, 'm': self.m, 'x': self.x}
+
+ def get_policy_as_list(self):
+ return [self.k, self.c, self.x, self.m]
+
+ def check_policy(self):
+ if self.k > self.c:
+ raise ValueError('Invalid policy: k({0}) < c({1}) is required'.format(self.k, self.c))
+ if self.c > self.k + self.m:
+ raise ValueError('Invalid policy: c({0}) < k + m ({1} + {2}) is required'.format(self.c, self.k, self.m))
+ clone = self.get_policy_as_dict()
+ clone.pop('m')
+ if 0 in clone.values():
+ raise ValueError('Policy: {0}: {1} cannot be equal to zero'.format(self.get_policy_as_list(), ''.join([i[0] for i in clone.items() if i[1] == 0])))
+
+ for p in policies:
+ _Policy(p).check_policy()
diff --git a/helpers/storagerouter.py b/helpers/storagerouter.py
index 65b43f4..18df9ec 100644
--- a/helpers/storagerouter.py
+++ b/helpers/storagerouter.py
@@ -13,13 +13,13 @@
#
# Open vStorage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY of any kind.
-from ovs.dal.lists.storagerouterlist import StorageRouterList
from ovs.dal.hybrids.storagerouter import StorageRouter
+from ovs.dal.lists.storagerouterlist import StorageRouterList
from ovs.extensions.generic.logger import Logger
-from ovs.extensions.generic.system import System
+from ..helpers.ci_constants import CIConstants
-class StoragerouterHelper(object):
+class StoragerouterHelper(CIConstants):
"""
StoragerouterHelper class
@@ -42,77 +42,51 @@ def get_storagerouter_by_guid(storagerouter_guid):
"""
return StorageRouter(storagerouter_guid)
- @staticmethod
- def get_storagerouter_guid_by_ip(storagerouter_ip):
- """
-
- :param storagerouter_ip: ip of a storagerouter
- :type storagerouter_ip: str
- :return: storagerouter guid
- :rtype: str
- """
- return StoragerouterHelper.get_storagerouter_by_ip(storagerouter_ip).guid
-
@staticmethod
def get_storagerouter_by_ip(storagerouter_ip):
"""
-
:param storagerouter_ip: ip of a storagerouter
:type storagerouter_ip: str
- :return: storagerouter object
+ :return: storagerouter
:rtype: ovs.dal.hybrids.storagerouter.StorageRouter
"""
return StorageRouterList.get_by_ip(storagerouter_ip)
@staticmethod
- def get_disks_by_ip(storagerouter_ip):
+ def get_storagerouter_ip(storagerouter_guid):
"""
-
- :param storagerouter_ip:
- :type storagerouter_ip: str
- :return: disks found for the storagerouter ip
- :rtype: list of
+ :param storagerouter_guid: guid of a storagerouter
+ :type storagerouter_guid: str
+ :return: storagerouter ip
+ :rtype: str
"""
- storagerouter_guid = StoragerouterHelper.get_storagerouter_guid_by_ip(storagerouter_ip)
- return StorageRouter(storagerouter_guid).disks
+ return StorageRouter(storagerouter_guid).ip
@staticmethod
- def get_disk_by_ip(ip, diskname):
+ def get_disk_by_name(guid, diskname):
"""
- Fetch a disk by its ip and name
+ Fetch a disk by its guid and name
- :param ip: ip address of a storagerouter
+ :param guid: guid of a storagerouter
+ :type guid: str
:param diskname: shortname of a disk (e.g. sdb)
:return: Disk Object
:rtype: ovs.dal.hybrids.disk.disk
"""
-
- storagerouter_guid = StoragerouterHelper.get_storagerouter_guid_by_ip(ip)
- disks = StorageRouter(storagerouter_guid).disks
+ disks = StoragerouterHelper.get_storagerouter_by_guid(guid).disks
for d in disks:
if d.name == diskname:
return d
- @staticmethod
- def get_local_storagerouter():
- """
- Fetch the local storagerouter settings
-
- :return: StorageRouter Object
- :rtype: ovs.dal.hybrids.storagerouter.StorageRouter
- """
-
- return System.get_my_storagerouter()
-
@staticmethod
def get_storagerouter_ips():
- """
- Fetch all the ip addresses in this cluster
+ """
+ Fetch all the ip addresses in this cluster
- :return: list with storagerouter ips
- :rtype: list
- """
- return [storagerouter.ip for storagerouter in StorageRouterList.get_storagerouters()]
+ :return: list with storagerouter ips
+ :rtype: list
+ """
+ return [storagerouter.ip for storagerouter in StorageRouterList.get_storagerouters()]
@staticmethod
def get_storagerouters():
@@ -122,72 +96,54 @@ def get_storagerouters():
:return: list with storagerouters
:rtype: list
"""
-
return StorageRouterList.get_storagerouters()
- @staticmethod
- def get_master_storagerouters():
- """
- Fetch the master storagerouters
-
- :return: list with master storagerouters
- :rtype: list
- """
-
- return StorageRouterList.get_masters()
-
- @staticmethod
- def get_master_storagerouter_ips():
- """
- Fetch the master storagerouters ips
-
- :return: list with master storagerouters ips
- :rtype: list
- """
-
- return [storagerouter.ip for storagerouter in StorageRouterList.get_masters()]
-
- @staticmethod
- def get_slave_storagerouters():
- """
- Fetch the slave storagerouters
-
- :return: list with slave storagerouters
- :rtype: list
- """
-
- return StorageRouterList.get_slaves()
-
- @staticmethod
- def get_slave_storagerouter_ips():
- """
- Fetch the slave storagerouters ips
-
- :return: list with slave storagerouters ips
- :rtype: list
- """
-
- return [storagerouter.ip for storagerouter in StorageRouterList.get_slaves()]
-
- @staticmethod
- def sync_disk_with_reality(api, guid=None, ip=None, timeout=None):
+ @classmethod
+ def sync_disk_with_reality(cls, guid=None, ip=None, timeout=None, *args, **kwargs):
"""
-
- :param api: specify a valid api connection to the setup
- :type api: helpers.api.OVSClient
:param guid: guid of the storagerouter
:type guid: str
:param ip: ip of the storagerouter
:type ip: str
:param timeout: timeout time in seconds
:type timeout: int
+ """
+ if guid is not None:
+ if ip is not None:
+ Logger.warning('Both storagerouter guid and ip passed, using guid for sync.')
+ storagerouter_guid = guid
+ elif ip is not None:
+ storagerouter_guid = StoragerouterHelper.get_storagerouter_by_ip(ip).guid
+ else:
+ raise ValueError('No guid or ip passed.')
+ task_id = cls.api.post(api='/storagerouters/{0}/rescan_disks/'.format(storagerouter_guid), data=None)
+ return cls.api.wait_for_task(task_id=task_id, timeout=timeout)
+
+ @classmethod
+ def get_storagerouters_by_role(cls):
+ """
+ Gets storagerouters based on roles
:return:
"""
- storagerouter_guid = guid
- if ip is not None:
- storagerouter_guid = StoragerouterHelper.get_storagerouter_guid_by_ip(ip)
- if storagerouter_guid is None:
- raise ValueError('No guid or ip found.')
+ voldr_str_1 = None # Will act as volumedriver node
+ voldr_str_2 = None # Will act as volumedriver node
+ compute_str = None # Will act as compute node
+ if isinstance(cls.HYPERVISOR_INFO, dict): # Hypervisor section is filled in -> VM environment
+ nodes_info = {}
+ for hv_ip, hv_info in cls.HYPERVISOR_INFO['vms'].iteritems():
+ nodes_info[hv_ip] = hv_info
+ elif cls.SETUP_CFG['ci'].get('nodes') is not None: # Physical node section -> Physical environment
+ nodes_info = cls.SETUP_CFG['ci']['nodes']
else:
- task_id = api.post(api='/storagerouters/{0}/rescan_disks/'.format(storagerouter_guid), data=None)
- return api.wait_for_task(task_id=task_id, timeout=timeout)
+ raise RuntimeError('Unable to fetch node information. Either hypervisor section or node section is missing!')
+ for node_ip, node_details in nodes_info.iteritems():
+ if node_details['role'] == "VOLDRV":
+ if voldr_str_1 is None:
+ voldr_str_1 = StoragerouterHelper.get_storagerouter_by_ip(node_ip)
+ elif voldr_str_2 is None:
+ voldr_str_2 = StoragerouterHelper.get_storagerouter_by_ip(node_ip)
+ elif node_details['role'] == "COMPUTE" and compute_str is None:
+ compute_str = StoragerouterHelper.get_storagerouter_by_ip(node_ip)
+ assert voldr_str_1 is not None and voldr_str_2 is not None and compute_str is not None,\
+ 'Could not fetch 2 storagedriver nodes and 1 compute node based on the setup.json config.'
+ return voldr_str_1, voldr_str_2, compute_str
\ No newline at end of file
diff --git a/helpers/tests/__init__.py b/helpers/tests/__init__.py
new file mode 100644
index 0000000..8ae97b6
--- /dev/null
+++ b/helpers/tests/__init__.py
@@ -0,0 +1,19 @@
+# Copyright (C) 2016 iNuron NV
+#
+# This file is part of Open vStorage Open Source Edition (OSE),
+# as available from
+#
+# http://www.openvstorage.org and
+# http://www.openvstorage.com.
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Affero General Public License v3 (GNU AGPLv3)
+# as published by the Free Software Foundation, in version 3 as it comes
+# in the LICENSE.txt file of the Open vStorage OSE distribution.
+#
+# Open vStorage is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY of any kind.
+
+"""
+Init
+"""
diff --git a/helpers/tests/jsongeneratortestcase.py b/helpers/tests/jsongeneratortestcase.py
new file mode 100644
index 0000000..810ef38
--- /dev/null
+++ b/helpers/tests/jsongeneratortestcase.py
@@ -0,0 +1,389 @@
+# Copyright (C) 2016 iNuron NV
+#
+# This file is part of Open vStorage Open Source Edition (OSE),
+# as available from
+#
+# http://www.openvstorage.org and
+# http://www.openvstorage.com.
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Affero General Public License v3 (GNU AGPLv3)
+# as published by the Free Software Foundation, in version 3 as it comes
+# in the LICENSE.txt file of the Open vStorage OSE distribution.
+#
+# Open vStorage is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY of any kind.
+import unittest
+from ci.api_lib.helpers.setupjsongenerator import SetupJsonGenerator
+
+
+class JsonGeneratorTestcase(unittest.TestCase):
+ def __init__(self, *args, **kwargs):
+ super(JsonGeneratorTestcase, self).__init__(*args, **kwargs)
+ self.generator = SetupJsonGenerator()
+ self.ip_1 = '127.0.0.1'
+ self.ip_2 = '127.0.0.2'
+ self.ip_3 = '127.0.0.3'
+ self.ip_4 = '127.0.0.4'
+
+ def test_structure(self):
+ self.assertEquals(len(self.generator.config.keys()), 0)
+ self.assertTrue(isinstance(self.generator.config, dict))
+
+ def test_model_ci(self):
+ self.generator.update_ci(ci_params={'setup': True, 'grid_ip': self.ip_1})
+ self.assertTrue(isinstance(self.generator.config['ci']['setup'], bool))
+ self.assertEquals(len(self.generator.config['ci']), 10)
+
+ def test_add_hypervisor(self):
+ self.generator.update_ci(ci_params={'setup': True, 'grid_ip': self.ip_1})
+ with self.assertRaises(ValueError):
+ self.generator.add_hypervisor(hypervisor_ip='5')
+ self.generator.add_hypervisor(hypervisor_ip=self.ip_1)
+ self.assertEquals(len(self.generator.config['ci']), 11)
+ self.assertTrue('vms' in self.generator.config['ci']['hypervisor'].keys())
+ self.assertTrue('ubuntu_node_0.1' in self.generator.config['ci']['hypervisor']['vms'][self.ip_1]['name'])
+
+
+ def test_remove_hypervisor(self):
+ self.generator.update_ci(ci_params={'setup': True, 'grid_ip': self.ip_1})
+ self.generator.add_hypervisor(hypervisor_ip=self.ip_1)
+ self.generator.remove_hypervisor(hypervisor_ip=self.ip_2)
+ self.generator.add_hypervisor(hypervisor_ip=self.ip_1)
+ self.assertEquals(len(self.generator.config['ci']['hypervisor']), 5)
+
+ def test_model_scenarios(self):
+ self.generator.update_scenarios()
+ self.assertEquals(self.generator.config['scenarios'], ['ALL'])
+ with self.assertRaises(ValueError):
+ self.generator.update_scenarios(['ABC', 'def'])
+
+ def test_add_domain(self):
+ self.generator.add_domain('domain1')
+ self.generator.add_domain('domain2')
+ self.assertEquals(len(self.generator.config['setup']['domains']), 2)
+ with self.assertRaises(ValueError):
+ self.generator.add_domain(7)
+
+ def test_remove_domain(self):
+ self.generator.add_domain('domain1')
+ self.generator.add_domain('domain2')
+ self.generator.remove_domain('domain1')
+ self.assertEquals(len(self.generator.config['setup']['domains']), 1)
+
+ def test_storagerouter_addition_removal(self):
+ self.generator.add_domain('domain1')
+ self.generator.add_domain('domain2')
+ with self.assertRaises(ValueError):
+ self.generator.add_storagerouter(storagerouter_ip='100', hostname='hostname')
+ self.generator.add_storagerouter(storagerouter_ip=self.ip_1, hostname='hostname')
+ self.assertTrue(self.ip_1 in self.generator.config['setup']['storagerouters'].keys())
+ self.generator.add_storagerouter(storagerouter_ip=self.ip_2, hostname='hostname')
+ self.generator.remove_storagerouter(storagerouter_ip=self.ip_2)
+ self.assertFalse(self.ip_2 in self.generator.config['setup']['storagerouters'].keys())
+
+ with self.assertRaises(ValueError):
+ self.generator.add_disk_to_sr(storagerouter_ip='5', name='disk1', roles=['SCRUB', 'DTL'])
+ with self.assertRaises(ValueError):
+ self.generator.add_disk_to_sr(storagerouter_ip=self.ip_1, name='disk1', roles=['bla'])
+ self.generator.add_disk_to_sr(storagerouter_ip=self.ip_1, name='disk1', roles=['SCRUB', 'DTL'])
+ self.assertTrue('disk1' in self.generator.config['setup']['storagerouters'][self.ip_1]['disks'])
+ self.assertEquals(len(self.generator.config['setup']['storagerouters'][self.ip_1]['disks']['disk1']['roles']), 2)
+ self.generator.add_disk_to_sr(storagerouter_ip=self.ip_1, name='disk2', roles=['DB'])
+ self.generator.remove_disk_from_sr(storagerouter_ip=self.ip_1, name='disk2')
+ self.assertFalse('disk2' in self.generator.config['setup']['storagerouters'][self.ip_1])
+
+ self.generator.add_domain_to_sr(storagerouter_ip=self.ip_1, name='domain1')
+ self.generator.add_domain_to_sr(storagerouter_ip=self.ip_1, name='domain1', recovery=True)
+ self.generator.add_domain_to_sr(storagerouter_ip=self.ip_1, name='domain2')
+ self.generator.remove_domain_from_sr(storagerouter_ip=self.ip_1, name='domain2')
+ self.assertFalse('domain2' in self.generator.config['setup']['storagerouters'][self.ip_1])
+
+ self.assertEquals(len(self.generator.config['setup']['storagerouters'][self.ip_1]['domains']['domain_guids']), 1)
+ self.assertEquals(len(self.generator.config['setup']['storagerouters'][self.ip_1]['domains']['recovery_domain_guids']), 1)
+
+ self.generator.add_domain_to_sr(storagerouter_ip=self.ip_1, name='domain2')
+ self.generator.add_domain_to_sr(storagerouter_ip=self.ip_1, name='domain2', recovery=True)
+ self.assertEquals(len(self.generator.config['setup']['storagerouters'][self.ip_1]['domains']['domain_guids']), 2)
+ self.assertEquals(len(self.generator.config['setup']['storagerouters'][self.ip_1]['domains']['recovery_domain_guids']), 2)
+
+ def test_backend_addition_removal(self):
+ self.generator.add_domain('domain1')
+ self.generator.add_domain('domain2')
+
+ self.generator.add_backend(backend_name='mybackend', domains=['domain1'])
+ self.assertItemsEqual(self.generator.config['setup']['backends'][0].keys(), ['name', 'domains', 'scaling'])
+ self.generator.add_backend(backend_name='mybackend02', domains=['domain1'], scaling='GLOBAL')
+ self.assertItemsEqual(self.generator.config['setup']['backends'][1].keys(), ['name', 'domains', 'scaling'])
+
+ self.generator.add_preset_to_backend(backend_name='mybackend02', preset_name='mypreset', policies=[[1, 2, 2, 1]])
+ self.assertEqual(self.generator.config['setup']['backends'][1]['name'], 'mybackend02')
+ with self.assertRaises(ValueError):
+ self.generator.add_preset_to_backend(backend_name='non-existing_backend', preset_name='mypreset', policies=[1, 2, 2, 1])
+
+ self.generator.add_osd_to_backend(backend_name='mybackend', osds_on_disks={self.ip_1: {'vdb': 2}})
+ self.assertEquals(len(self.generator.config['setup']['backends'][0]['osds']), 1)
+
+ self.generator.add_osd_to_backend(backend_name='mybackend', osds_on_disks={self.ip_2: {'vdb': 2}})
+ self.assertEquals(len(self.generator.config['setup']['backends'][0]['osds']), 2)
+
+ self.generator.remove_osd_from_backend(backend_name='mybackend', osd_identifier=self.ip_2)
+ self.assertEquals(len(self.generator.config['setup']['backends'][0]['osds']), 1)
+
+ self.assertEqual(self.generator.config['setup']['backends'][0]['osds'][self.ip_1]['vdb'], 2)
+ with self.assertRaises(ValueError):
+ self.generator.add_osd_to_backend(backend_name='mybackend02', osds_on_disks={self.ip_1: {'vdb': 2}})
+ self.generator.add_osd_to_backend(backend_name='mybackend02', linked_backend='mybackend', linked_preset='mypreset')
+ self.assertEqual(self.generator.config['setup']['backends'][1]['osds']['mybackend'], 'mypreset')
+ self.generator.remove_backend('mybackend02')
+ self.assertNotEquals(len(self.generator.config['setup']['backends']), 3)
+
+ def test_vpool_addition_removal(self):
+ vpoolname = 'vpool01'
+ self.generator.add_domain('domain1')
+ self.generator.add_storagerouter(storagerouter_ip=self.ip_1, hostname='hostname')
+ self.generator.add_backend(backend_name='mybackend', domains=['domain1'])
+ self.generator.add_preset_to_backend(backend_name='mybackend', preset_name='mypreset', policies=[[1, 2, 2, 1]])
+ with self.assertRaises(ValueError):
+ self.generator.add_vpool(storagerouter_ip=self.ip_1, vpool_name=vpoolname, backend_name='non-existing_backend', preset_name='mypreset', storage_ip=self.ip_1)
+ with self.assertRaises(ValueError):
+ self.generator.add_vpool(storagerouter_ip=self.ip_1, vpool_name=vpoolname, backend_name='mybackend', preset_name='non-existing_preset', storage_ip=self.ip_1)
+
+ self.generator.add_vpool(storagerouter_ip=self.ip_1, vpool_name=vpoolname, backend_name='mybackend', preset_name='mypreset', storage_ip=self.ip_1)
+ self.generator.add_vpool(storagerouter_ip=self.ip_1, backend_name='mybackend', preset_name='mypreset', storage_ip=self.ip_1, vpool_name='vpool1000')
+ self.generator.remove_vpool(storagerouter_ip=self.ip_1, vpool_name='vpool1000')
+ self.assertFalse('vpool1000' in self.generator.config['setup']['storagerouters'][self.ip_1]['vpools'])
+
+ def test_storagedriver_addition_removal(self):
+ vpoolname = 'vpool01'
+ self.generator.add_domain('domain1')
+ self.generator.add_storagerouter(storagerouter_ip=self.ip_1, hostname='hostname')
+ self.generator.add_backend(backend_name='mybackend', domains=['domain1'])
+ self.generator.add_preset_to_backend(backend_name='mybackend', preset_name='mypreset', policies=[[1, 2, 2, 1]])
+ self.generator.add_vpool(storagerouter_ip=self.ip_1, vpool_name=vpoolname, backend_name='mybackend', preset_name='mypreset', storage_ip=self.ip_1)
+ self.generator.update_storagedriver_of_vpool(sr_ip=self.ip_1, vpool_name=vpoolname, sr_params={'sco_size': 8})
+ path = self.generator.config['setup']['storagerouters'][self.ip_1]['vpools'][vpoolname]
+ self.assertEquals(path['storagedriver']['sco_size'], 8)
+ self.assertTrue(isinstance(path['storagedriver']['deduplication'], str))
+ self.generator.remove_storagedriver_from_vpool(sr_ip=self.ip_1, vpool_name=vpoolname)
+ self.assertFalse('storagedriver' in path.keys())
+
+ def test_full_flow(self):
+ self.generator.update_ci(ci_params={'setup': True, 'grid_ip': self.ip_1})
+ self.generator.add_hypervisor(hypervisor_ip=self.ip_1,
+ virtual_machines={self.ip_2: {'name': 'ubuntu16.04-ovsnode01-setup1',
+ 'role': 'COMPUTE'},
+ self.ip_3: {'name': 'ubuntu16.04-ovsnode02-setup1',
+ 'role': 'VOLDRV'},
+ self.ip_4: {'name': 'ubuntu16.04-ovsnode03-setup1',
+ 'role': 'VOLDRV'}})
+
+ self.generator.update_scenarios()
+ self.generator.add_domain('Roubaix')
+ self.generator.add_domain('Gravelines')
+ self.generator.add_domain('Strasbourg')
+
+ # add backends ####
+
+ self.generator.add_backend(backend_name='mybackend', domains=['Roubaix'])
+ self.generator.add_osd_to_backend(backend_name='mybackend', osds_on_disks={self.ip_2: {'sde': 2, 'sdf': 2},
+ self.ip_3: {'sde': 2, 'sdf': 2},
+ self.ip_4: {'sde': 2, 'sdf': 2}})
+ self.generator.add_preset_to_backend(backend_name='mybackend', preset_name='mypreset', policies=[[1, 2, 2, 1]])
+
+ self.generator.add_backend(backend_name='mybackend02', domains=['Gravelines'])
+ self.generator.add_preset_to_backend(backend_name='mybackend02', preset_name='mypreset', policies=[[1, 2, 2, 1]])
+ self.generator.add_osd_to_backend(backend_name='mybackend02', osds_on_disks={self.ip_2: {'sdg': 2},
+ self.ip_3: {'sdg': 2},
+ self.ip_4: {'sdg': 2}})
+
+ self.generator.add_backend(backend_name='mybackend-global', domains=['Roubaix', 'Gravelines', 'Strasbourg'], scaling='GLOBAL')
+ self.generator.add_preset_to_backend(backend_name='mybackend-global', preset_name='mypreset', policies=[[1, 2, 2, 1]])
+ self.generator.add_osd_to_backend(backend_name='mybackend-global', linked_backend='mybackend', linked_preset='mypreset')
+ self.generator.add_osd_to_backend(backend_name='mybackend-global', linked_backend='mybackend02', linked_preset='mypreset')
+
+ # add storagerouter 1
+
+ self.generator.add_storagerouter(storagerouter_ip=self.ip_2, hostname='ovs-node-1-1604')
+ self.generator.add_domain_to_sr(storagerouter_ip=self.ip_2, name='Roubaix')
+ self.generator.add_domain_to_sr(storagerouter_ip=self.ip_2, name='Gravelines', recovery=True)
+ self.generator.add_domain_to_sr(storagerouter_ip=self.ip_2, name='Strasbourg', recovery=True)
+
+ self.generator.add_disk_to_sr(storagerouter_ip=self.ip_2, name='sda', roles=['WRITE', 'DTL'])
+ self.generator.add_disk_to_sr(storagerouter_ip=self.ip_2, name='sdb', roles=['DB'])
+ self.generator.add_disk_to_sr(storagerouter_ip=self.ip_2, name='sdc', roles=['SCRUB'])
+
+ self.generator.add_vpool(storagerouter_ip=self.ip_2, vpool_name='myvpool01', backend_name='mybackend-global', preset_name='mypreset', storage_ip=self.ip_1)
+ self.generator.change_cache(storagerouter_ip=self.ip_2, vpool='myvpool01', block_cache=True, fragment_cache=False, on_write=False)
+ self.generator.change_cache(storagerouter_ip=self.ip_2, vpool='myvpool01', fragment_cache=True, block_cache=False, on_read=False, on_write=True)
+ self.generator.update_storagedriver_of_vpool(sr_ip=self.ip_2, vpool_name='myvpool01', sr_params={'sco_size': 8})
+
+ # add storagerouter2
+
+ self.generator.add_storagerouter(storagerouter_ip=self.ip_3, hostname='ovs-node-2-1604')
+ self.generator.add_domain_to_sr(storagerouter_ip=self.ip_3, name='Gravelines')
+ self.generator.add_domain_to_sr(storagerouter_ip=self.ip_3, name='Roubaix', recovery=True)
+ self.generator.add_domain_to_sr(storagerouter_ip=self.ip_3, name='Strasbourg', recovery=True)
+
+ self.generator.add_disk_to_sr(storagerouter_ip=self.ip_3, name='sda', roles=['WRITE', 'DTL'])
+ self.generator.add_disk_to_sr(storagerouter_ip=self.ip_3, name='sdb', roles=['DB'])
+ self.generator.add_disk_to_sr(storagerouter_ip=self.ip_3, name='sdc', roles=['SCRUB'])
+
+ self.generator.add_vpool(storagerouter_ip=self.ip_3, vpool_name='myvpool01', backend_name='mybackend-global', preset_name='mypreset', storage_ip=self.ip_1)
+ self.generator.change_cache(storagerouter_ip=self.ip_3, vpool='myvpool01', fragment_cache=True, block_cache=True, on_write=False, on_read=True)
+ self.generator.update_storagedriver_of_vpool(sr_ip=self.ip_3, vpool_name='myvpool01')
+
+ # add storagerouter 3
+
+ self.generator.add_storagerouter(storagerouter_ip=self.ip_4, hostname='ovs-node-3-1604')
+ self.generator.add_domain_to_sr(storagerouter_ip=self.ip_4, name='Gravelines')
+ self.generator.add_domain_to_sr(storagerouter_ip=self.ip_4, name='Roubaix', recovery=True)
+ self.generator.add_domain_to_sr(storagerouter_ip=self.ip_4, name='Strasbourg', recovery=True)
+
+ self.generator.add_disk_to_sr(storagerouter_ip=self.ip_4, name='sda', roles=['WRITE', 'DTL'])
+ self.generator.add_disk_to_sr(storagerouter_ip=self.ip_4, name='sdb', roles=['DB'])
+ self.generator.add_disk_to_sr(storagerouter_ip=self.ip_4, name='sdc', roles=['SCRUB'])
+ self.generator.add_vpool(storagerouter_ip=self.ip_4, vpool_name='myvpool01', backend_name='mybackend-global', preset_name='mypreset', storage_ip=self.ip_1)
+ self.generator.update_storagedriver_of_vpool(sr_ip=self.ip_4, vpool_name='myvpool01', sr_params={'global_read_buffer': 256})
+
+ expected_output = {u'ci': {u'cleanup': False,
+ u'config_manager': u'arakoon',
+ u'fail_on_failed_scenario': True,
+ u'grid_ip': u'127.0.0.1',
+ u'hypervisor': {u'password': u'rooter',
+ u'type': u'KVM',
+ u'user': u'root',
+ u'ip': u'127.0.0.1',
+ u'vms': {u'127.0.0.2': {u'name': u'ubuntu16.04-ovsnode01-setup1',
+ u'role': u'COMPUTE'},
+ u'127.0.0.3': {u'name': u'ubuntu16.04-ovsnode02-setup1',
+ u'role': u'VOLDRV'},
+ u'127.0.0.4': {u'name': u'ubuntu16.04-ovsnode03-setup1',
+ u'role': u'VOLDRV'}}},
+ u'local_hypervisor': {u'password': u'rooter',
+ u'type': u'KVM',
+ u'user': u'root'},
+ u'scenarios': True,
+ u'send_to_testrail': True,
+ u'setup': True,
+ u'user': {u'api': {u'password': u'admin', u'username': u'admin'},
+ u'shell': {u'password': u'rooter', u'username': u'root'}},
+ u'version': u'andes'},
+ u'scenarios': [u'ALL'],
+ u'setup': {
+ u'backends': [{u'domains': {u'domain_guids': [u'Roubaix']},
+ u'name': u'mybackend',
+ u'osds': {u'127.0.0.2': {u'sde': 2, u'sdf': 2},
+ u'127.0.0.3': {u'sde': 2, u'sdf': 2},
+ u'127.0.0.4': {u'sde': 2, u'sdf': 2}},
+ u'presets': [{u'compression': u'snappy',
+ u'encryption': u'none',
+ u'fragment_size': 2097152,
+ u'name': u'mypreset',
+ u'policies': [[1, 2, 2, 1]]}],
+ u'scaling': u'LOCAL'},
+ {u'domains': {u'domain_guids': [u'Gravelines']},
+ u'name': u'mybackend02',
+ u'osds': {u'127.0.0.2': {u'sdg': 2},
+ u'127.0.0.3': {u'sdg': 2},
+ u'127.0.0.4': {u'sdg': 2}},
+ u'presets': [{u'compression': u'snappy',
+ u'encryption': u'none',
+ u'fragment_size': 2097152,
+ u'name': u'mypreset',
+ u'policies': [[1, 2, 2, 1]]}],
+ u'scaling': u'LOCAL'},
+ {u'domains': {u'domain_guids': [u'Roubaix', u'Gravelines', u'Strasbourg']},
+ u'name': u'mybackend-global',
+ u'osds': {u'mybackend': u'mypreset', u'mybackend02': u'mypreset'},
+ u'presets': [{u'compression': u'snappy',
+ u'encryption': u'none',
+ u'fragment_size': 2097152,
+ u'name': u'mypreset',
+ u'policies': [[1, 2, 2, 1]]}],
+ u'scaling': u'GLOBAL'}],
+
+ u'domains': [u'Roubaix', u'Gravelines', u'Strasbourg'],
+ u'storagerouters': {u'127.0.0.2': {u'disks': {u'sda': {u'roles': [u'WRITE',
+ u'DTL']},
+ u'sdb': {u'roles': [u'DB']},
+ u'sdc': {u'roles': [u'SCRUB']}},
+ u'domains': {u'domain_guids': [u'Roubaix'],
+ u'recovery_domain_guids': [u'Gravelines', u'Strasbourg']},
+ u'hostname': u'ovs-node-1-1604',
+ u'vpools': {u'myvpool01': {u'backend_name': u'mybackend-global',
+ u'block_cache': {u'location': u'disk',
+ u'strategy': {u'cache_on_read': True, u'cache_on_write': False}},
+ u'fragment_cache': {u'location': u'disk',
+ u'strategy': {u'cache_on_read': False, u'cache_on_write': True}},
+ u'preset': u'mypreset',
+ u'proxies': 1,
+ u'storage_ip': u'127.0.0.1',
+ u'storagedriver': {u'cluster_size': 4,
+ u'dtl_mode': u'sync',
+ u'dtl_transport': u'tcp',
+ u'global_write_buffer': 128,
+ u'global_read_buffer': 128,
+ u'deduplication': "non_dedupe",
+ u'strategy': "none",
+ u'sco_size': 8,
+ u'volume_write_buffer': 512}}}},
+ u'127.0.0.3': {u'disks': {u'sda': {u'roles': [u'WRITE', u'DTL']},
+ u'sdb': {u'roles': [u'DB']},
+ u'sdc': {u'roles': [u'SCRUB']}},
+ u'domains': {u'domain_guids': [u'Gravelines'],
+ u'recovery_domain_guids': [u'Roubaix', u'Strasbourg']},
+ u'hostname': u'ovs-node-2-1604',
+ u'vpools': {u'myvpool01': {u'backend_name': u'mybackend-global',
+ u'block_cache': {u'location': u'disk',
+ u'strategy': {u'cache_on_read': True, u'cache_on_write': False}},
+ u'fragment_cache': {u'location': u'disk',
+ u'strategy': {u'cache_on_read': True, u'cache_on_write': False}},
+ u'preset': u'mypreset',
+ u'proxies': 1,
+ u'storage_ip': u'127.0.0.1',
+ u'storagedriver': {u'cluster_size': 4,
+ u'dtl_mode': u'sync',
+ u'dtl_transport': u'tcp',
+ u'global_write_buffer': 128,
+ u'global_read_buffer': 128,
+ u'deduplication': "non_dedupe",
+ u'strategy': "none",
+ u'sco_size': 4,
+ u'volume_write_buffer': 512}}}},
+ u'127.0.0.4': {u'disks': {u'sda': {u'roles': [u'WRITE', u'DTL']},
+ u'sdb': {u'roles': [u'DB']},
+ u'sdc': {u'roles': [u'SCRUB']}},
+ u'domains': {u'domain_guids': [u'Gravelines'],
+ u'recovery_domain_guids': [u'Roubaix', u'Strasbourg']},
+ u'hostname': u'ovs-node-3-1604',
+ u'vpools': {u'myvpool01': {u'backend_name': u'mybackend-global',
+ u'block_cache': {u'location': u'disk',
+ u'strategy': {u'cache_on_read': False, u'cache_on_write': False}},
+ u'fragment_cache': {u'location': u'disk',
+ u'strategy': {u'cache_on_read': False, u'cache_on_write': False}},
+ u'preset': u'mypreset',
+ u'proxies': 1,
+ u'storage_ip': u'127.0.0.1',
+ u'storagedriver': {u'cluster_size': 4,
+ u'dtl_mode': u'sync',
+ u'dtl_transport': u'tcp',
+ u'global_write_buffer': 128,
+ u'global_read_buffer': 256,
+ u'deduplication': "non_dedupe",
+ u'strategy': "none",
+ u'sco_size': 4,
+ u'volume_write_buffer': 512}}}}}
+ }
+ }
+
+ self.assertDictEqual(self.generator.config['ci'], expected_output[u'ci'])
+ self.assertEqual(self.generator.config['setup']['domains'], expected_output['setup'][u'domains'])
+ self.assertDictEqual(self.generator.config['setup']['storagerouters'], expected_output['setup'][u'storagerouters'])
+ self.assertEqual(self.generator.config['setup']['backends'], expected_output['setup'][u'backends'])
+
+ self.assertDictEqual(self.generator.config, expected_output)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/helpers/thread.py b/helpers/thread.py
index 612c2d9..6fe2a47 100644
--- a/helpers/thread.py
+++ b/helpers/thread.py
@@ -25,7 +25,8 @@ class ThreadHelper(object):
@staticmethod
def start_thread_with_event(target, name, args=(), kwargs=None):
"""
- Starts a thread and an event to it
+ Starts a thread and an event to it.
+ The passed target function needs to accept an param 'event' which will contain the stopEvent object
:param target: target - usually a method
:type target: object
:param name: name of the thread
@@ -37,9 +38,11 @@ def start_thread_with_event(target, name, args=(), kwargs=None):
"""
if kwargs is None:
kwargs = {}
+ if 'event' in kwargs:
+ raise ValueError('event is a reserved keyword of this function')
ThreadHelper.LOGGER.info('Starting thread with target {0}'.format(target))
event = threading.Event()
- args = args + (event,)
+ kwargs['event'] = event
thread = threading.Thread(target=target, args=tuple(args), kwargs=kwargs)
thread.setName(str(name))
thread.setDaemon(True)
diff --git a/helpers/vdisk.py b/helpers/vdisk.py
index 81dd640..6ecee1f 100644
--- a/helpers/vdisk.py
+++ b/helpers/vdisk.py
@@ -13,14 +13,16 @@
#
# Open vStorage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY of any kind.
+
from ovs.dal.hybrids.vdisk import VDisk
from ovs.dal.lists.vdisklist import VDiskList
from ovs.dal.lists.vpoollist import VPoolList
from ovs.extensions.generic.logger import Logger
+from ..helpers.ci_constants import CIConstants
from ..helpers.exceptions import VPoolNotFoundError, VDiskNotFoundError
-class VDiskHelper(object):
+class VDiskHelper(CIConstants):
"""
vDiskHelper class
"""
@@ -105,8 +107,9 @@ def get_snapshot_by_guid(snapshot_guid, vdisk_name, vpool_name):
raise RuntimeError("Did not find snapshot with guid `{0}` on vdisk `{1}` on vpool `{2}`"
.format(snapshot_guid, vdisk_name, vpool_name))
- @staticmethod
- def get_config_params(vdisk_name, vpool_name, api, timeout=GET_CONFIG_PARAMS_TIMEOUT):
+
+ @classmethod
+ def get_config_params(cls, vdisk_name, vpool_name, timeout=GET_CONFIG_PARAMS_TIMEOUT, *args, **kwargs):
"""
Fetch the config parameters of a vDisk
@@ -115,8 +118,6 @@ def get_config_params(vdisk_name, vpool_name, api, timeout=GET_CONFIG_PARAMS_TIM
:type vdisk_name: str
:param vpool_name: name of a existing vpool
:type vpool_name: str
- :param api: specify a valid api connection to the setup
- :type api: ci.helpers.api.OVSClient
:param timeout: time to wait for the task to complete
:type timeout: int
:return: a dict with config parameters, e.g.
@@ -133,8 +134,8 @@ def get_config_params(vdisk_name, vpool_name, api, timeout=GET_CONFIG_PARAMS_TIM
"""
vdisk = VDiskHelper.get_vdisk_by_name(vdisk_name=vdisk_name, vpool_name=vpool_name)
- task_guid = api.get(api='/vdisks/{0}/get_config_params'.format(vdisk.guid))
- task_result = api.wait_for_task(task_id=task_guid, timeout=timeout)
+ task_guid = cls.api.get(api='/vdisks/{0}/get_config_params'.format(vdisk.guid))
+ task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout)
if not task_result[0]:
error_msg = "Setting config vDisk `{0}` has failed with error {1}".format(vdisk_name, task_result[1])
@@ -144,23 +145,22 @@ def get_config_params(vdisk_name, vpool_name, api, timeout=GET_CONFIG_PARAMS_TIM
VDiskHelper.LOGGER.info("Setting config vDisk `{0}` should have succeeded".format(vdisk_name))
return task_result[1]
- @staticmethod
- def scrub_vdisk(vdisk_guid, api, timeout=15 * 60, wait=True):
+
+ @classmethod
+ def scrub_vdisk(cls, vdisk_guid, timeout=15 * 60, wait=True, *args, **kwargs):
"""
Scrub a specific vdisk
:param vdisk_guid: guid of the vdisk to scrub
:type vdisk_guid: str
- :param api: specify a valid api connection to the setup
- :type api: ci.helpers.api.OVSClient
:param timeout: time to wait for the task to complete
:type timeout: int
:param wait: wait for task to finish or not
:type wait: bool
:return:
"""
- task_guid = api.post(api='/vdisks/{0}/scrub/'.format(vdisk_guid), data={})
+ task_guid = cls.api.post(api='/vdisks/{0}/scrub/'.format(vdisk_guid), data={})
if wait is True:
- task_result = api.wait_for_task(task_id=task_guid, timeout=timeout)
+ task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout)
if not task_result[0]:
error_msg = "Scrubbing vDisk `{0}` has failed with error {1}".format(vdisk_guid, task_result[1])
VDiskHelper.LOGGER.error(error_msg)
diff --git a/packaging/debian/debian/openvstorage-automation-lib.postinst b/packaging/debian/debian/openvstorage-automation-lib.postinst
new file mode 100644
index 0000000..3e82486
--- /dev/null
+++ b/packaging/debian/debian/openvstorage-automation-lib.postinst
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+touch /opt/OpenvStorage/ci/__init__.py
diff --git a/packaging/settings.json b/packaging/settings.json
index 8939cda..595a226 100644
--- a/packaging/settings.json
+++ b/packaging/settings.json
@@ -5,6 +5,6 @@
"source_contents": "--transform 's,^,{0}-{1}/,' helpers remove setup validate __init__.py *.txt",
"version": {
"major": 0,
- "minor": 4
+ "minor": 5
}
}
diff --git a/remove/backend.py b/remove/backend.py
index 05d1808..01bcb0a 100644
--- a/remove/backend.py
+++ b/remove/backend.py
@@ -16,26 +16,28 @@
from ovs.extensions.generic.logger import Logger
from ..helpers.albanode import AlbaNodeHelper
from ..helpers.backend import BackendHelper
+from ..helpers.ci_constants import CIConstants
from ..validate.decorators import required_backend, required_preset
-class BackendRemover(object):
+class BackendRemover(CIConstants):
LOGGER = Logger("remove-ci_backend_remover")
REMOVE_ASD_TIMEOUT = 60
REMOVE_DISK_TIMEOUT = 60
REMOVE_BACKEND_TIMEOUT = 60
REMOVE_PRESET_TIMEOUT = 60
+ UNLINK_BACKEND_TIMEOUT = 60
def __init__(self):
pass
- @staticmethod
- def remove_claimed_disk(api):
+ @classmethod
+ def remove_claimed_disk(cls):
pass
- @staticmethod
- def remove_asds(albabackend_name, target, disks, api):
+ @classmethod
+ def remove_asds(cls, albabackend_name, target, disks, *args, **kwargs):
"""
Remove all asds from a backend
@@ -43,8 +45,6 @@ def remove_asds(albabackend_name, target, disks, api):
:type target: str
:param disks: dict with diskname as key and amount of osds as value
:type disks: dict
- :param api: specify a valid api connection to the setup
- :type api: helpers.api.OVSClient
:param albabackend_name: Name of the AlbaBackend to configure
:type albabackend_name: str
:return: preset_name
@@ -53,9 +53,9 @@ def remove_asds(albabackend_name, target, disks, api):
albabackend_guid = BackendHelper.get_alba_backend_guid_by_name(albabackend_name)
# target is a node
- node_mapping = AlbaNodeHelper._map_alba_nodes(api)
+ node_mapping = AlbaNodeHelper._map_alba_nodes()
- local_stack = BackendHelper.get_backend_local_stack(albabackend_name=albabackend_name, api=api)
+ local_stack = BackendHelper.get_backend_local_stack(albabackend_name=albabackend_name)
for disk, amount_of_osds in disks.iteritems():
disk_object = AlbaNodeHelper.get_disk_by_ip(ip=target, diskname=disk)
# Get the name of the disk out of the path, only expecting one with ata-
@@ -64,15 +64,13 @@ def remove_asds(albabackend_name, target, disks, api):
# Check if the alba_node_id has the disk
if disk_path in local_stack['local_stack'][alba_node_id]:
# Remove asds
- if disk_path in local_stack['local_stack'][alba_node_id]:
- for asd_id, asd_info in local_stack['local_stack'][alba_node_id][disk_path]['asds'].iteritems():
- BackendRemover.LOGGER.info('Removing asd {0} for disk {1}'.format(asd_id, local_stack['local_stack'][alba_node_id][disk_path]['guid']))
- asd_safety = BackendHelper.get_asd_safety(albabackend_guid=albabackend_guid, asd_id=asd_id, api=api)
- BackendRemover._remove_asd(alba_node_guid=alba_node_guid, asd_id=asd_id, asd_safety=asd_safety, api=api)
+ for asd_id, asd_info in local_stack['local_stack'][alba_node_id][disk_path]['osds'].iteritems():
+ BackendRemover.LOGGER.info('Removing asd {0} for disk {1}'.format(asd_id, disk_path))
+ asd_safety = BackendHelper.get_asd_safety(albabackend_guid=albabackend_guid, asd_id=asd_id)
+ BackendRemover._remove_asd(alba_node_guid=alba_node_guid, asd_id=asd_id, asd_safety=asd_safety)
# Restarting iteration to avoid too many local stack calls:
- local_stack = BackendHelper.get_backend_local_stack(albabackend_name=albabackend_name,
- api=api)
+ local_stack = BackendHelper.get_backend_local_stack(albabackend_name=albabackend_name)
for disk, amount_of_osds in disks.iteritems():
disk_object = AlbaNodeHelper.get_disk_by_ip(ip=target, diskname=disk)
# Get the name of the disk out of the path, only expecting one with ata-
@@ -82,10 +80,10 @@ def remove_asds(albabackend_name, target, disks, api):
if disk_path in local_stack['local_stack'][alba_node_id]:
# Initialize disk:
BackendRemover.LOGGER.info('Removing {0}.'.format(disk_path))
- BackendRemover._remove_disk(alba_node_guid=alba_node_guid, diskname=disk_path, api=api)
+ BackendRemover._remove_disk(alba_node_guid=alba_node_guid, diskname=disk_path)
- @staticmethod
- def _remove_asd(alba_node_guid, asd_id, asd_safety, api, timeout=REMOVE_ASD_TIMEOUT):
+ @classmethod
+ def _remove_asd(cls, alba_node_guid, asd_id, asd_safety, timeout=REMOVE_ASD_TIMEOUT, *args, **kwargs):
"""
Remove a asd from a backend
@@ -95,8 +93,6 @@ def _remove_asd(alba_node_guid, asd_id, asd_safety, api, timeout=REMOVE_ASD_TIME
:type asd_id: str
:param asd_safety:
:type asd_safety: dict
- :param api: specify a valid api connection to the setup
- :type api: helpers.api.OVSClient
:param timeout: max. time to wait for a task to complete
:type timeout: int
:return:
@@ -106,19 +102,19 @@ def _remove_asd(alba_node_guid, asd_id, asd_safety, api, timeout=REMOVE_ASD_TIME
'asd_id': asd_id,
'safety': asd_safety
}
- task_guid = api.post(
+ task_guid = cls.api.post(
api='/alba/nodes/{0}/reset_asd/'.format(alba_node_guid),
data=data
)
- result = api.wait_for_task(task_id=task_guid, timeout=timeout)
+ result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout)
if result[0] is False:
error_msg = "Removal of ASD '{0}; failed with {1}".format(asd_id, result[1])
BackendRemover.LOGGER.error(error_msg)
raise RuntimeError(error_msg)
return result[0]
- @staticmethod
- def _remove_disk(alba_node_guid, diskname, api, timeout=REMOVE_DISK_TIMEOUT):
+ @classmethod
+ def _remove_disk(cls, alba_node_guid, diskname, timeout=REMOVE_DISK_TIMEOUT, *args, **kwargs):
"""
Removes a an initiliazed disk from the model
@@ -126,8 +122,6 @@ def _remove_disk(alba_node_guid, diskname, api, timeout=REMOVE_DISK_TIMEOUT):
:type alba_node_guid: str
:param diskname: name of the disk
:type diskname: str
- :param api: specify a valid api connection to the setup
- :type api: helpers.api.OVSClient
:param timeout: max. time to wait for the task to complete
:type timeout: int
:return:
@@ -135,27 +129,25 @@ def _remove_disk(alba_node_guid, diskname, api, timeout=REMOVE_DISK_TIMEOUT):
data = {
'disk': diskname,
}
- task_guid = api.post(
+ task_guid = cls.api.post(
api='/alba/nodes/{0}/remove_disk/'.format(alba_node_guid),
data=data
)
- result = api.wait_for_task(task_id=task_guid, timeout=timeout)
+ result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout)
if result[0] is False:
errormsg = "Removal of ASD '{0}' failed with '{1}'".format(diskname, result[1])
BackendRemover.LOGGER.error(errormsg)
raise RuntimeError(errormsg)
return result[0]
- @staticmethod
+ @classmethod
@required_backend
- def remove_backend(albabackend_name, api, timeout=REMOVE_BACKEND_TIMEOUT):
+ def remove_backend(cls, albabackend_name, timeout=REMOVE_BACKEND_TIMEOUT, *args, **kwargs):
"""
Removes a alba backend from the ovs cluster
:param albabackend_name: the name of a existing alba backend
:type albabackend_name: str
- :param api: specify a valid api connection to the setup
- :type api: helpers.api.OVSClient
:param timeout: max. time to wait for a task to complete
:type timeout: int
:return: task was succesfull or not
@@ -163,20 +155,19 @@ def remove_backend(albabackend_name, api, timeout=REMOVE_BACKEND_TIMEOUT):
"""
alba_backend_guid = BackendHelper.get_alba_backend_guid_by_name(albabackend_name)
- task_guid = api.delete(api='/alba/backends/{0}'.format(alba_backend_guid))
-
- result = api.wait_for_task(task_id=task_guid, timeout=timeout)
+ task_guid = cls.api.delete(api='/alba/backends/{0}'.format(alba_backend_guid))
+ result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout)
if result[0] is False:
errormsg = "Removal of backend '{0}' failed with '{1}'".format(albabackend_name, result[1])
BackendRemover.LOGGER.error(errormsg)
raise RuntimeError(errormsg)
return result[0]
- @staticmethod
+ @classmethod
@required_preset
@required_backend
- def remove_preset(preset_name, albabackend_name, api, timeout=REMOVE_PRESET_TIMEOUT):
+ def remove_preset(cls, preset_name, albabackend_name, timeout=REMOVE_PRESET_TIMEOUT, *args, **kwargs):
"""
Removes a alba backend from the ovs cluster
@@ -184,8 +175,6 @@ def remove_preset(preset_name, albabackend_name, api, timeout=REMOVE_PRESET_TIME
:type preset_name: str
:param albabackend_name: name of the albabackend
:type albabackend_name: str
- :param api: specify a valid api connection to the setup
- :type api: helpers.api.OVSClient
:param timeout: max. time to wait for a task to complete
:type timeout: int
:return: task was succesfull or not
@@ -194,12 +183,48 @@ def remove_preset(preset_name, albabackend_name, api, timeout=REMOVE_PRESET_TIME
alba_backend_guid = BackendHelper.get_alba_backend_guid_by_name(albabackend_name)
data = {"name": preset_name}
- task_guid = api.post(api='/alba/backends/{0}/delete_preset'.format(alba_backend_guid), data=data)
+ task_guid = cls.api.post(api='/alba/backends/{0}/delete_preset'.format(alba_backend_guid), data=data)
- result = api.wait_for_task(task_id=task_guid, timeout=timeout)
+ result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout)
if result[0] is False:
errormsg = "Removal of preset '{0}' for backend '{1}' failed with '{2}'".format(preset_name, albabackend_name, result[1])
BackendRemover.LOGGER.error(errormsg)
raise RuntimeError(errormsg)
return result[0]
+
+
+ @classmethod
+ #@required_backend
+ def unlink_backend(cls, globalbackend_name, albabackend_name, timeout=UNLINK_BACKEND_TIMEOUT, *args, **kwargs):
+ """
+ Link a LOCAL backend to a GLOBAL backend
+
+ :param globalbackend_name: name of a GLOBAL alba backend
+ :type globalbackend_name: str
+ :param albabackend_name: name of a backend to unlink
+ :type albabackend_name: str
+ :param timeout: timeout counter in seconds
+ :type timeout: int
+ :return:
+ """
+ data = {
+ "linked_guid": BackendHelper.get_alba_backend_guid_by_name(albabackend_name)
+ }
+
+ task_guid = cls.api.post(
+ api='/alba/backends/{0}/unlink_alba_backends'
+ .format(BackendHelper.get_alba_backend_guid_by_name(globalbackend_name)),
+ data=data
+ )
+
+ task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout)
+ if not task_result[0]:
+ error_msg = "Unlinking backend `{0}` from global backend `{1}` has failed with error '{2}'".format(
+ albabackend_name, globalbackend_name, task_result[1])
+ BackendRemover.LOGGER.error(error_msg)
+ raise RuntimeError(error_msg)
+ else:
+ BackendRemover.LOGGER.info("Unlinking backend `{0}` from global backend `{1}` should have succeeded"
+ .format(albabackend_name, globalbackend_name))
+ return task_result[0]
diff --git a/remove/roles.py b/remove/roles.py
index e7f9c7e..16cdb02 100644
--- a/remove/roles.py
+++ b/remove/roles.py
@@ -13,85 +13,98 @@
#
# Open vStorage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY of any kind.
+
from subprocess import check_output
+from ovs.extensions.generic.system import System
from ovs.extensions.generic.logger import Logger
+from ovs.extensions.generic.sshclient import SSHClient
+from ..helpers.ci_constants import CIConstants
from ..helpers.fstab import FstabHelper
from ..helpers.storagerouter import StoragerouterHelper
from ..setup.roles import RoleSetup
-class RoleRemover(object):
+class RoleRemover(CIConstants):
LOGGER = Logger("remove-ci_role_remover")
CONFIGURE_DISK_TIMEOUT = 300
@staticmethod
- def _umount(mountpoint):
+ def _umount(mountpoint, client=None):
"""
Unmount the given partition
:param mountpoint: Location where the mountpoint is mounted
:type mountpoint: str
:return:
"""
+ if client is None:
+ client = SSHClient(System.get_my_storagerouter(), username='root')
try:
- check_output('umount {0}'.format(mountpoint), shell=True)
+ client.run(['umount', mountpoint])
except Exception:
RoleRemover.LOGGER.exception('Unable to umount mountpoint {0}'.format(mountpoint))
raise RuntimeError('Could not unmount {0}'.format(mountpoint))
@staticmethod
- def _remove_filesystem(device, alias_part_label):
+ def _remove_filesystem(device, alias_part_label, client=None):
"""
:param alias_part_label: eg /dev/disk/by-partlabel/ata-QEMU_HARDDISK_QM00011
:type alias_part_label: str
:return:
"""
+ if client is None:
+ client = SSHClient(System.get_my_storagerouter(), username='root')
try:
partition_cmd = "udevadm info --name={0} | awk -F '=' '/ID_PART_ENTRY_NUMBER/{{print $NF}}'".format(alias_part_label)
- partition_number = check_output(partition_cmd, shell=True)
- format_cmd = 'parted {0} rm {1}'.format(device, partition_number)
- check_output(format_cmd, shell=True)
+ partition_number = client.run(partition_cmd, allow_insecure=True)
+ if partition_number:
+ format_cmd = 'parted {0} rm {1}'.format(device, partition_number)
+ client.run(format_cmd.split())
except Exception:
RoleRemover.LOGGER.exception('Unable to remove filesystem of {0}'.format(alias_part_label))
raise RuntimeError('Could not remove filesystem of {0}'.format(alias_part_label))
- @staticmethod
- def remove_role(ip, diskname, api):
- allowed_roles = ['WRITE', 'READ', 'SCRUB', 'DB']
- RoleRemover.LOGGER.info("Starting removal of disk roles.")
-
+ @classmethod
+ def remove_role(cls, storagerouter_ip, diskname, *args, **kwargs):
+ allowed_roles = ['WRITE', 'DTL', 'SCRUB', 'DB']
+ cls.LOGGER.info("Starting removal of disk roles.")
# Fetch information
- storagerouter_guid = StoragerouterHelper.get_storagerouter_guid_by_ip(ip)
- disk = StoragerouterHelper.get_disk_by_ip(ip, diskname)
+
+ storagerouter = StoragerouterHelper.get_storagerouter_by_ip(storagerouter_ip=storagerouter_ip)
+ disk = StoragerouterHelper.get_disk_by_name(guid=storagerouter.guid, diskname=diskname)
# Check if there are any partitions on the disk, if so check if there is enough space
+ client = SSHClient(storagerouter, username='root')
+
if len(disk.partitions) > 0:
for partition in disk.partitions:
# Remove all partitions that have roles
if set(partition.roles).issubset(allowed_roles) and len(partition.roles) > 0:
- RoleRemover.LOGGER.info("Removing {0} from partition {1} on disk {2}".format(partition.roles, partition.guid, diskname))
- RoleSetup.configure_disk(storagerouter_guid=storagerouter_guid,
+ cls.LOGGER.info("Removing {0} from partition {1} on disk {2}".format(partition.roles, partition.guid, diskname))
+ RoleSetup.configure_disk(storagerouter_guid=storagerouter.guid,
disk_guid=disk.guid,
offset=partition.offset,
size=disk.size,
roles=[],
- api=api,
partition_guid=partition.guid)
- # Unmount partition
- RoleRemover.LOGGER.info("Umounting disk {2}".format(partition.roles, partition.guid, diskname))
- RoleRemover._umount(partition.mountpoint)
+
+
+ cls._umount(partition.mountpoint, client=client)
# Remove from fstab
- RoleRemover.LOGGER.info("Removing {0} from fstab".format(partition.mountpoint, partition.guid, diskname))
- FstabHelper().remove_by_mountpoint(partition.mountpoint)
+
+ cls.LOGGER.info("Removing {0} from fstab".format(partition.mountpoint, partition.guid, diskname))
+ FstabHelper(client=client).remove_by_mountpoint(partition.mountpoint,client)
# Remove filesystem
- RoleRemover.LOGGER.info("Removing filesystem on partition {0} on disk {1}".format(partition.guid, diskname))
+ cls.LOGGER.info("Removing filesystem on partition {0} on disk {1}".format(partition.guid, diskname))
alias = partition.aliases[0]
device = '/dev/{0}'.format(diskname)
- RoleRemover._remove_filesystem(device, alias)
+ cls._remove_filesystem(device, alias,client=client)
# Remove partition from model
- RoleRemover.LOGGER.info("Removing partition {0} on disk {1} from model".format(partition.guid, diskname))
+ cls.LOGGER.info("Removing partition {0} on disk {1} from model".format(partition.guid, diskname))
partition.delete()
else:
- RoleRemover.LOGGER.info("Found no roles on partition {1} on disk {2}".format(partition.roles, partition.guid, diskname))
+ print 'Found no roles on partition'
+ RoleRemover.LOGGER.info("{1} on disk {2}".format(partition.roles, partition.guid, diskname))
else:
+ print 'found no partition'
RoleRemover.LOGGER.info("Found no partition on the disk.")
diff --git a/remove/vdisk.py b/remove/vdisk.py
index fc7f962..0749814 100644
--- a/remove/vdisk.py
+++ b/remove/vdisk.py
@@ -15,11 +15,12 @@
# but WITHOUT ANY WARRANTY of any kind.
from ovs.extensions.generic.logger import Logger
+from ..helpers.ci_constants import CIConstants
from ..helpers.vdisk import VDiskHelper
from ..validate.decorators import required_vtemplate
-class VDiskRemover(object):
+class VDiskRemover(CIConstants):
LOGGER = Logger("remove-ci_vdisk_remover")
REMOVE_SNAPSHOT_TIMEOUT = 60
@@ -29,13 +30,11 @@ class VDiskRemover(object):
def __init__(self):
pass
- @staticmethod
- def remove_vdisks_with_structure(vdisks, api, timeout=REMOVE_VDISK_TIMEOUT):
+ @classmethod
+ def remove_vdisks_with_structure(cls, vdisks, timeout=REMOVE_VDISK_TIMEOUT, *args, **kwargs):
"""
Remove many vdisks at once. Will keep the parent structure in mind
:param vdisks: list of vdisks
- :param api: specify a valid api connection to the setup
- :type api: helpers.api.OVSClient
:param timeout: seconds to elapse before raising a timeout error (for each volume)
:return:
"""
@@ -45,13 +44,13 @@ def remove_vdisks_with_structure(vdisks, api, timeout=REMOVE_VDISK_TIMEOUT):
continue
if len(vdisk.child_vdisks_guids) > 0:
for vdisk_child_guid in vdisk.child_vdisks_guids:
- VDiskRemover.remove_vdisk(vdisk_child_guid, api)
+ VDiskRemover.remove_vdisk(vdisk_child_guid)
removed_guids.append(vdisk_child_guid)
- VDiskRemover.remove_vdisk(vdisk.guid, api, timeout)
+ VDiskRemover.remove_vdisk(vdisk.guid, timeout)
removed_guids.append(vdisk.guid)
- @staticmethod
- def remove_snapshot(snapshot_guid, vdisk_name, vpool_name, api, timeout=REMOVE_SNAPSHOT_TIMEOUT):
+ @classmethod
+ def remove_snapshot(cls, snapshot_guid, vdisk_name, vpool_name, timeout=REMOVE_SNAPSHOT_TIMEOUT, *args, **kwargs):
"""
Remove a existing snapshot from a existing vdisk
:param vdisk_name: location of a vdisk on a vpool
@@ -59,8 +58,6 @@ def remove_snapshot(snapshot_guid, vdisk_name, vpool_name, api, timeout=REMOVE_S
:type vdisk_name: str
:param snapshot_guid: unique guid of a snapshot
:type snapshot_guid: str
- :param api: specify a valid api connection to the setup
- :type api: helpers.api.OVSClient
:param timeout: time to wait for the task to complete
:type timeout: int
:param vpool_name: name of a existing vpool
@@ -71,11 +68,11 @@ def remove_snapshot(snapshot_guid, vdisk_name, vpool_name, api, timeout=REMOVE_S
vdisk_guid = VDiskHelper.get_vdisk_by_name(vdisk_name, vpool_name).guid
data = {"snapshot_id": snapshot_guid}
- task_guid = api.post(
+ task_guid = cls.api.post(
api='/vdisks/{0}/remove_snapshot/'.format(vdisk_guid),
data=data
)
- task_result = api.wait_for_task(task_id=task_guid, timeout=timeout)
+ task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout)
if not task_result[0]:
error_msg = "Deleting snapshot `{0}` for vdisk `{1}` has failed".format(snapshot_guid, vdisk_name)
@@ -86,21 +83,19 @@ def remove_snapshot(snapshot_guid, vdisk_name, vpool_name, api, timeout=REMOVE_S
.format(snapshot_guid, vdisk_name))
return True
- @staticmethod
- def remove_vdisk(vdisk_guid, api, timeout=REMOVE_VDISK_TIMEOUT):
+ @classmethod
+ def remove_vdisk(cls, vdisk_guid, timeout=REMOVE_VDISK_TIMEOUT, *args, **kwargs):
"""
Remove a vdisk from a vPool
:param vdisk_guid: guid of a existing vdisk
:type vdisk_guid: str
- :param api: specify a valid api connection to the setup
- :type api: helpers.api.OVSClient
:param timeout: time to wait for the task to complete
:type timeout: int
:return: if success
:rtype: bool
"""
- task_guid = api.post(api='vdisks/{0}/delete'.format(vdisk_guid))
- task_result = api.wait_for_task(task_id=task_guid, timeout=timeout)
+ task_guid = cls.api.post(api='vdisks/{0}/delete'.format(vdisk_guid))
+ task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout)
if not task_result[0]:
error_msg = "Deleting vDisk `{0}` has failed".format(vdisk_guid)
VDiskRemover.LOGGER.error(error_msg)
@@ -109,43 +104,39 @@ def remove_vdisk(vdisk_guid, api, timeout=REMOVE_VDISK_TIMEOUT):
VDiskRemover.LOGGER.info("Deleting vDisk `{0}` should have succeeded".format(vdisk_guid))
return True
- @staticmethod
- def remove_vdisk_by_name(vdisk_name, vpool_name, api, timeout=REMOVE_VDISK_TIMEOUT):
+ @classmethod
+ def remove_vdisk_by_name(cls, vdisk_name, vpool_name, timeout=REMOVE_VDISK_TIMEOUT, *args, **kwargs):
"""
Remove a vdisk from a vPool
:param vdisk_name: name of a existing vdisk (e.g. test.raw)
:type vdisk_name: str
- :param api: specify a valid api connection to the setup
- :type api: helpers.api.OVSClient
:param vpool_name: name of a existing vpool
:type vpool_name: str
:return: if success
:rtype: bool
"""
vdisk_guid = VDiskHelper.get_vdisk_by_name(vdisk_name, vpool_name).guid
- return VDiskRemover.remove_vdisk(vdisk_guid, api, timeout)
+ return VDiskRemover.remove_vdisk(vdisk_guid, timeout)
- @staticmethod
+ @classmethod
@required_vtemplate
- def remove_vtemplate_by_name(vdisk_name, vpool_name, api, timeout=REMOVE_VTEMPLATE_TIMEOUT):
+ def remove_vtemplate_by_name(cls, vdisk_name, vpool_name, timeout=REMOVE_VTEMPLATE_TIMEOUT, *args, **kwargs):
"""
Remove a vTemplate from a cluster
:param vdisk_name: name of a existing vdisk (e.g. test.raw)
:type vdisk_name: str
:param vpool_name: name of a existing vpool
:type vpool_name: str
- :param api: specify a valid api connection to the setup
- :type api: helpers.api.OVSClient
:param timeout: time to wait for the task to complete
:type timeout: int
:return: if success
:rtype: bool
"""
vdisk_guid = VDiskHelper.get_vdisk_by_name(vdisk_name, vpool_name).guid
- task_guid = api.post(
+ task_guid = cls.api.post(
api='/vdisks/{0}/delete_vtemplate/'.format(vdisk_guid)
)
- task_result = api.wait_for_task(task_id=task_guid, timeout=timeout)
+ task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout)
if not task_result[0]:
error_msg = "Deleting vTemplate `{0}` has failed".format(vdisk_name)
diff --git a/remove/vpool.py b/remove/vpool.py
index 471d1b9..1c03ba7 100644
--- a/remove/vpool.py
+++ b/remove/vpool.py
@@ -15,25 +15,24 @@
# but WITHOUT ANY WARRANTY of any kind.
from ovs.extensions.generic.logger import Logger
+from ..helpers.ci_constants import CIConstants
from ..helpers.storagerouter import StoragerouterHelper
from ..helpers.vpool import VPoolHelper
-class VPoolRemover(object):
+class VPoolRemover(CIConstants):
LOGGER = Logger("remove-ci_vpool_remover")
REMOVE_VPOOL_TIMEOUT = 500
- @staticmethod
- def remove_vpool(vpool_name, storagerouter_ip, api, timeout=REMOVE_VPOOL_TIMEOUT):
+ @classmethod
+ def remove_vpool(cls, vpool_name, storagerouter_ip, timeout=REMOVE_VPOOL_TIMEOUT, *args, **kwargs):
"""
Removes a existing vpool from a storagerouter
:param vpool_name: the name of a existing vpool
:type vpool_name: str
:param storagerouter_ip: the ip address of a existing storagerouter
:type storagerouter_ip: str
- :param api: specify a valid api connection to the setup
- :type api: helpers.api.OVSClient
:param timeout: max. time to wait for a task to complete
:type timeout: int
:return: None
@@ -42,9 +41,8 @@ def remove_vpool(vpool_name, storagerouter_ip, api, timeout=REMOVE_VPOOL_TIMEOUT
vpool_guid = VPoolHelper.get_vpool_by_name(vpool_name).guid
storagerouter_guid = StoragerouterHelper.get_storagerouter_by_ip(storagerouter_ip).guid
data = {"storagerouter_guid": storagerouter_guid}
- task_guid = api.post(api='/vpools/{0}/shrink_vpool/'.format(vpool_guid), data=data)
-
- task_result = api.wait_for_task(task_id=task_guid, timeout=timeout)
+ task_guid = cls.api.post(api='/vpools/{0}/shrink_vpool/'.format(vpool_guid), data=data)
+ task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout)
if not task_result[0]:
error_msg = "Deleting vPool `{0}` on storagerouter `{1}` has failed with error {2}".format(vpool_name, storagerouter_ip, task_result[1])
diff --git a/setup/arakoon.py b/setup/arakoon.py
index b990d67..52fe0cc 100644
--- a/setup/arakoon.py
+++ b/setup/arakoon.py
@@ -21,6 +21,7 @@
from ovs.lib.alba import AlbaController
from ..helpers.backend import BackendHelper
from ..validate.decorators import required_backend, required_arakoon_cluster
+from ..validate.backend import BackendValidation
class ArakoonSetup(object):
@@ -159,3 +160,44 @@ def checkup_nsm_hosts(albabackend_name, amount):
"""
alba_backend_guid = BackendHelper.get_alba_backend_guid_by_name(albabackend_name)
return AlbaController.nsm_checkup(backend_guid=alba_backend_guid, min_nsms=int(amount))
+
+
+ @staticmethod
+ def setup_external_arakoons(backend):
+ """
+ Setup external arakoons for a backend
+
+ :param backend: all backend details
+ :type backend: dict
+ :return: mapped external arakoons
+ :rtype: dict
+ """
+
+ # if backend does not exists, deploy the external arakoons
+ if not BackendValidation.check_backend(backend_name=backend['name']):
+ external_arakoon_mapping = {}
+ for ip, arakoons in backend['external_arakoon'].iteritems():
+ for arakoon_name, arakoon_settings in arakoons.iteritems():
+ # check if we already created one or not
+ if arakoon_name not in external_arakoon_mapping:
+ # if not created yet, create one and map it
+ external_arakoon_mapping[arakoon_name] = {}
+ external_arakoon_mapping[arakoon_name]['master'] = ip
+ external_arakoon_mapping[arakoon_name]['all'] = [ip]
+ ArakoonSetup.add_arakoon(cluster_name=arakoon_name, storagerouter_ip=ip,
+ cluster_basedir=arakoon_settings['base_dir'],
+ service_type=arakoon_settings['type'])
+ else:
+ # if created, extend it and map it
+ external_arakoon_mapping[arakoon_name]['all'].append(ip)
+ ArakoonSetup.extend_arakoon(cluster_name=arakoon_name,
+ master_storagerouter_ip=external_arakoon_mapping[arakoon_name]['master'],
+ storagerouter_ip=ip,
+ cluster_basedir=arakoon_settings['base_dir'],
+ service_type=arakoon_settings['type'],
+ clustered_nodes=external_arakoon_mapping[arakoon_name]['all'])
+ return external_arakoon_mapping
+ else:
+ ArakoonSetup.LOGGER.info("Skipping external arakoon creation because backend `{0}` already exists"
+ .format(backend['name']))
+ return
\ No newline at end of file
diff --git a/setup/backend.py b/setup/backend.py
index 6c9f294..318b633 100644
--- a/setup/backend.py
+++ b/setup/backend.py
@@ -17,11 +17,12 @@
from ovs.extensions.generic.logger import Logger
from ..helpers.albanode import AlbaNodeHelper
from ..helpers.backend import BackendHelper
+from ..helpers.ci_constants import CIConstants
from ..validate.decorators import required_roles, required_backend, required_preset, check_backend, check_preset, \
check_linked_backend, filter_osds
-class BackendSetup(object):
+class BackendSetup(CIConstants):
LOGGER = Logger("setup-ci_backend_setup")
LOCAL_STACK_SYNC = 30
@@ -32,16 +33,15 @@ class BackendSetup(object):
CLAIM_ASD_TIMEOUT = 60
LINK_BACKEND_TIMEOUT = 60
MAX_BACKEND_TRIES = 20
-
MAX_CLAIM_RETRIES = 5
def __init__(self):
pass
- @staticmethod
+ @classmethod
@check_backend
@required_roles(['DB'])
- def add_backend(backend_name, api, scaling='LOCAL', timeout=BACKEND_TIMEOUT, max_tries=MAX_BACKEND_TRIES):
+ def add_backend(cls, backend_name, scaling='LOCAL', timeout=BACKEND_TIMEOUT, max_tries=MAX_BACKEND_TRIES, *args, **kwargs):
"""
Add a new backend
:param backend_name: Name of the Backend to add
@@ -50,8 +50,6 @@ def add_backend(backend_name, api, scaling='LOCAL', timeout=BACKEND_TIMEOUT, max
:type scaling: str
:return: backend_name
:rtype: str
- :param api: specify a valid api connection to the setup
- :type api: helpers.api.OVSClient
:param timeout: timeout between tries
:type timeout: int
:param max_tries: amount of max. tries to check if a backend has been successfully created
@@ -60,7 +58,7 @@ def add_backend(backend_name, api, scaling='LOCAL', timeout=BACKEND_TIMEOUT, max
:rtype: bool
"""
# ADD_BACKEND
- backend = api.post(
+ backend = cls.api.post(
api='backends',
data={
'name': backend_name,
@@ -70,7 +68,7 @@ def add_backend(backend_name, api, scaling='LOCAL', timeout=BACKEND_TIMEOUT, max
)
# ADD_ALBABACKEND
- api.post(api='alba/backends', data={'backend_guid': backend['guid'], 'scaling': scaling})
+ cls.api.post(api='alba/backends', data={'backend_guid': backend['guid'], 'scaling': scaling})
# CHECK_STATUS until done
backend_running_status = "RUNNING"
@@ -96,10 +94,10 @@ def add_backend(backend_name, api, scaling='LOCAL', timeout=BACKEND_TIMEOUT, max
.format(backend_name, scaling, BackendHelper.get_backend_status_by_name(backend_name)))
return False
- @staticmethod
+ @classmethod
@check_preset
@required_backend
- def add_preset(albabackend_name, preset_details, api, timeout=ADD_PRESET_TIMEOUT):
+ def add_preset(cls, albabackend_name, preset_details, timeout=ADD_PRESET_TIMEOUT, *args, **kwargs):
"""
Add a new preset
:param albabackend_name: albabackend name (e.g. 'mybackend')
@@ -117,8 +115,6 @@ def add_preset(albabackend_name, preset_details, api, timeout=ADD_PRESET_TIMEOUT
"fragment_size": 2097152
}
:type preset_details: dict
- :param api: specify a valid api connection to the setup
- :type api: helpers.api.OVSClient
:param timeout: amount of max time that preset may take to be added
:type timeout: int
:return: success or not
@@ -132,12 +128,12 @@ def add_preset(albabackend_name, preset_details, api, timeout=ADD_PRESET_TIMEOUT
'fragment_size': preset_details['fragment_size']}
# ADD_PRESET
- task_guid = api.post(
+ task_guid = cls.api.post(
api='/alba/backends/{0}/add_preset'.format(BackendHelper.get_alba_backend_guid_by_name(albabackend_name)),
data=preset
)
- task_result = api.wait_for_task(task_id=task_guid, timeout=timeout)
+ task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout)
if not task_result[0]:
error_msg = "Preset `{0}` has failed to create on backend `{1}`".format(preset_details['name'], albabackend_name)
@@ -147,10 +143,10 @@ def add_preset(albabackend_name, preset_details, api, timeout=ADD_PRESET_TIMEOUT
BackendSetup.LOGGER.info("Creation of preset `{0}` should have succeeded on backend `{1}`".format(preset_details['name'], albabackend_name))
return True
- @staticmethod
+ @classmethod
@required_preset
@required_backend
- def update_preset(albabackend_name, preset_name, policies, api, timeout=UPDATE_PRESET_TIMEOUT):
+ def update_preset(cls, albabackend_name, preset_name, policies, timeout=UPDATE_PRESET_TIMEOUT, *args, **kwargs):
"""
Update a existing preset
:param albabackend_name: albabackend name
@@ -159,20 +155,18 @@ def update_preset(albabackend_name, preset_name, policies, api, timeout=UPDATE_P
:type preset_name: str
:param policies: policies to be updated (e.g. [[1,1,2,2], [1,1,1,2]])
:type policies: list > list
- :param api: specify a valid api connection to the setup
- :type api: helpers.api.OVSClient
:param timeout: amount of max time that preset may take to be added
:type timeout: int
:return: success or not
:rtype: bool
"""
- task_guid = api.post(
+ task_guid = cls.api.post(
api='/alba/backends/{0}/update_preset'
.format(BackendHelper.get_alba_backend_guid_by_name(albabackend_name)),
data={"name": preset_name, "policies": policies}
)
- task_result = api.wait_for_task(task_id=task_guid, timeout=timeout)
+ task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout)
if not task_result[0]:
error_msg = "Preset `{0}` has failed to update with policies `{1}` on backend `{2}`"\
@@ -184,30 +178,28 @@ def update_preset(albabackend_name, preset_name, policies, api, timeout=UPDATE_P
.format(preset_name, albabackend_name))
return True
- @staticmethod
+ @classmethod
@required_backend
@filter_osds
- def add_asds(target, disks, albabackend_name, api, claim_retries=MAX_CLAIM_RETRIES):
+ def add_asds(cls, target, disks, albabackend_name, claim_retries=MAX_CLAIM_RETRIES, *args, **kwargs):
"""
Initialize and claim a new asds on given disks
:param target: target to add asds too
:type target: str
:param disks: dict with diskname as key and amount of osds as value
:type disks: dict
- :param claim_retries: Maximum amount of claim retries
- :type claim_retries: int
- :param api: specify a valid api connection to the setup
- :type api: helpers.api.OVSClient
:param albabackend_name: Name of the AlbaBackend to configure
:type albabackend_name: str
+ :param claim_retries: Maximum amount of claim retries
+ :type claim_retries: int
:return: preset_name
:rtype: str
"""
- BackendSetup._discover_and_register_nodes(api) # Make sure all backends are registered
- node_mapping = AlbaNodeHelper._map_alba_nodes(api) # target is a node
+ BackendSetup._discover_and_register_nodes() # Make sure all backends are registered
+ node_mapping = AlbaNodeHelper._map_alba_nodes() # target is a node
alba_backend_guid = BackendHelper.get_alba_backend_guid_by_name(albabackend_name)
- backend_info = BackendHelper.get_backend_local_stack(albabackend_name=albabackend_name, api=api)
+ backend_info = BackendHelper.get_backend_local_stack(albabackend_name=albabackend_name)
local_stack = backend_info['local_stack']
node_slot_information = {}
for disk, amount_of_osds in disks.iteritems():
@@ -224,10 +216,11 @@ def add_asds(target, disks, albabackend_name, api, claim_retries=MAX_CLAIM_RETRI
'slot_id': slot_id,
'osd_type': 'ASD',
'alba_backend_guid': alba_backend_guid})
+
node_slot_information[alba_node_guid] = slot_information
for alba_node_guid, slot_information in node_slot_information.iteritems():
BackendSetup.LOGGER.info('Posting {0} for alba_node_guid {1}'.format(slot_information, alba_node_guid))
- BackendSetup._fill_slots(alba_node_guid=alba_node_guid, slot_information=slot_information, api=api)
+ BackendSetup._fill_slots(alba_node_guid=alba_node_guid, slot_information=slot_information)
# Local stack should sync with the new disks
BackendSetup.LOGGER.info('Sleeping for {0} seconds to let local stack sync.'.format(BackendSetup.LOCAL_STACK_SYNC))
@@ -266,14 +259,12 @@ def add_asds(target, disks, albabackend_name, api, claim_retries=MAX_CLAIM_RETRI
node_osds_to_claim[alba_node_guid] = osds_to_claim
for alba_node_guid, osds_to_claim in node_osds_to_claim.iteritems():
BackendSetup.LOGGER.info('Posting {0} for alba_node_guid {1}'.format(osds_to_claim, alba_node_guid))
- BackendSetup._claim_osds(alba_backend_name=albabackend_name, alba_node_guid=alba_node_guid, osds=osds_to_claim, api=api)
+ BackendSetup._claim_osds(alba_backend_name=albabackend_name, alba_node_guid=alba_node_guid, osds=osds_to_claim)
- @staticmethod
- def _discover_and_register_nodes(api):
+ @classmethod
+ def _discover_and_register_nodes(cls, *args, **kwargs):
"""
Will discover and register potential nodes to the DAL/Alba
- :param api: specify a valid api connection to the setup
- :type api: helpers.api.OVSClient
"""
options = {
@@ -281,61 +272,54 @@ def _discover_and_register_nodes(api):
'contents': 'node_id,_relations',
'discover': True
}
- response = api.get(
+ response = cls.api.get(
api='alba/nodes',
params=options
)
for node in response['data']:
- api.post(
+ cls.api.post(
api='alba/nodes',
data={'node_id': {'node_id': node['node_id']}}
)
- @staticmethod
- def _map_alba_nodes(api):
+ @classmethod
+ def _map_alba_nodes(cls, *args, **kwargs):
"""
Will map the alba_node_id with its guid counterpart and return the map dict
- :param api: specify a valid api connection to the setup
- :type api: helpers.api.OVSClient
"""
mapping = {}
options = {
'contents': 'node_id,_relations',
}
- response = api.get(
+ response = cls.api.get(
api='alba/nodes',
params=options
)
for node in response['data']:
- print node
mapping[node['node_id']] = node['guid']
return mapping
- @staticmethod
- def get_backend_local_stack(alba_backend_name, api):
+ @classmethod
+ def get_backend_local_stack(cls, alba_backend_name, *args, **kwargs):
"""
Fetches the local stack property of a backend
:param alba_backend_name: backend name
:type alba_backend_name: str
- :param api: specify a valid api connection to the setup
- :type api: helpers.api.OVSClient
"""
options = {
'contents': 'local_stack',
}
- return api.get(api='/alba/backends/{0}/'.format(BackendHelper.get_alba_backend_guid_by_name(alba_backend_name)),
+ return cls.api.get(api='/alba/backends/{0}/'.format(BackendHelper.get_alba_backend_guid_by_name(alba_backend_name)),
params={'queryparams': options}
)
- @staticmethod
- def _fill_slots(alba_node_guid, api, slot_information, timeout=INITIALIZE_DISK_TIMEOUT):
+ @classmethod
+ def _fill_slots(cls, alba_node_guid, slot_information, timeout=INITIALIZE_DISK_TIMEOUT, *args, **kwargs):
"""
Initializes a disk to create osds
:param alba_node_guid:
- :param api: specify a valid api connection to the setup
- :type api: helpers.api.OVSClient
:param timeout: timeout counter in seconds
:param slot_information: list of slots to fill
:type slot_information: list
@@ -343,12 +327,11 @@ def _fill_slots(alba_node_guid, api, slot_information, timeout=INITIALIZE_DISK_T
:return:
"""
data = {'slot_information': slot_information}
-
- task_guid = api.post(
+ task_guid = cls.api.post(
api='/alba/nodes/{0}/fill_slots/'.format(alba_node_guid),
data=data
)
- task_result = api.wait_for_task(task_id=task_guid, timeout=timeout)
+ task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout)
if not task_result[0]:
error_msg = "Initialize disk `{0}` for alba node `{1}` has failed".format(data, alba_node_guid)
BackendSetup.LOGGER.error(error_msg)
@@ -357,8 +340,8 @@ def _fill_slots(alba_node_guid, api, slot_information, timeout=INITIALIZE_DISK_T
BackendSetup.LOGGER.info("Successfully initialized '{0}'".format(data))
return task_result[0]
- @staticmethod
- def _claim_osds(alba_backend_name, alba_node_guid, osds, api, timeout=CLAIM_ASD_TIMEOUT):
+ @classmethod
+ def _claim_osds(cls, alba_backend_name, alba_node_guid, osds, timeout=CLAIM_ASD_TIMEOUT, *args, **kwargs):
"""
Claims a asd
:param alba_backend_name: backend name
@@ -367,19 +350,17 @@ def _claim_osds(alba_backend_name, alba_node_guid, osds, api, timeout=CLAIM_ASD_
:type alba_node_guid: str
:param osds: list of osds to claim
:type osds: list
- :param api: specify a valid api connection to the setup
- :type api: helpers.api.OVSClient
:param timeout: timeout counter in seconds
:type timeout: int
:return:
"""
data = {'alba_node_guid': alba_node_guid,
'osds': osds}
- task_guid = api.post(
+ task_guid = cls.api.post(
api='/alba/backends/{0}/add_osds/'.format(BackendHelper.get_alba_backend_guid_by_name(alba_backend_name)),
data=data
)
- task_result = api.wait_for_task(task_id=task_guid, timeout=timeout)
+ task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout)
if not task_result[0]:
error_msg = "Claim ASD `{0}` for alba backend `{1}` has failed with error '{2}'".format(osds, alba_backend_name, task_result[1])
@@ -389,11 +370,11 @@ def _claim_osds(alba_backend_name, alba_node_guid, osds, api, timeout=CLAIM_ASD_
BackendSetup.LOGGER.info("Succesfully claimed '{0}'".format(osds))
return task_result[0]
- @staticmethod
+ @classmethod
@required_preset
@required_backend
@check_linked_backend
- def link_backend(albabackend_name, globalbackend_name, preset_name, api, timeout=LINK_BACKEND_TIMEOUT):
+ def link_backend(cls, albabackend_name, globalbackend_name, preset_name, timeout=LINK_BACKEND_TIMEOUT, *args, **kwargs):
"""
Link a LOCAL backend to a GLOBAL backend
@@ -403,12 +384,11 @@ def link_backend(albabackend_name, globalbackend_name, preset_name, api, timeout
:type globalbackend_name: str
:param preset_name: name of the preset available in the LOCAL alba backend
:type preset_name: str
- :param api: specify a valid api connection to the setup
- :type api: helpers.api.OVSClient
:param timeout: timeout counter in seconds
:type timeout: int
:return:
"""
+
local_albabackend = BackendHelper.get_albabackend_by_name(albabackend_name)
data = {
@@ -427,14 +407,13 @@ def link_backend(albabackend_name, globalbackend_name, preset_name, api, timeout
}
}
}
- task_guid = api.post(
+ task_guid = cls.api.post(
api='/alba/backends/{0}/link_alba_backends'
.format(BackendHelper.get_alba_backend_guid_by_name(globalbackend_name)),
data=data
)
- task_result = api.wait_for_task(task_id=task_guid, timeout=timeout)
-
+ task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout)
if not task_result[0]:
error_msg = "Linking backend `{0}` to global backend `{1}` has failed with error '{2}'".format(
albabackend_name, globalbackend_name, task_result[1])
diff --git a/setup/domain.py b/setup/domain.py
index d4e2f7d..5a43317 100644
--- a/setup/domain.py
+++ b/setup/domain.py
@@ -16,34 +16,32 @@
from ovs.extensions.generic.logger import Logger
from ..helpers.backend import BackendHelper
+from ..helpers.ci_constants import CIConstants
from ..helpers.domain import DomainHelper
from ..helpers.storagerouter import StoragerouterHelper
from ..validate.decorators import required_backend
-class DomainSetup(object):
+class DomainSetup(CIConstants):
LOGGER = Logger("setup-ci_domain_setup")
def __init__(self):
pass
- @staticmethod
- def add_domain(domain_name, api):
+ @classmethod
+ def add_domain(cls, domain_name, *args, **kwargs):
"""
Add a new (recovery) domain to the cluster
- :param domain_name: name of a new domain
+ :param domain_name: name of a new domain to add
:type domain_name: str
- :param api: specify a valid api connection to the setup
- :type api: helpers.api.OVSClient
:return:
"""
-
# check if domain already exists
if not DomainHelper.get_domain_by_name(domain_name):
data = {"name": domain_name}
- api.post(
+ cls.api.post(
api='/domains/',
data=data
)
@@ -58,22 +56,18 @@ def add_domain(domain_name, api):
else:
return
- @staticmethod
- def link_domains_to_storagerouter(domain_details, storagerouter_ip, api):
+ @classmethod
+ def link_domains_to_storagerouter(cls, domain_details, storagerouter_ip, *args, **kwargs):
"""
Link a existing domain(s) and/or recovery (domains) to a storagerouter
-
:param domain_details: domain details of a storagerouter
example: {"domain_guids":["Gravelines"],"recovery_domain_guids":["Roubaix", "Strasbourg"]}
:type domain_details: dict
:param storagerouter_ip: ip address of a storage router
:type storagerouter_ip: str
- :param api: specify a valid api connection to the setup
- :type api: helpers.api.OVSClient
:return:
"""
- storagerouter_guid = StoragerouterHelper.get_storagerouter_guid_by_ip(storagerouter_ip)
domain_guids = []
recovery_domain_guids = []
# translate domain names to domain guids
@@ -86,25 +80,27 @@ def link_domains_to_storagerouter(domain_details, storagerouter_ip, api):
data = {"domain_guids": domain_guids,
"recovery_domain_guids": recovery_domain_guids}
- api.post(
+
+ storagerouter_guid = StoragerouterHelper.get_storagerouter_by_ip(storagerouter_ip).guid
+ cls.api.post(
api='/storagerouters/{0}/set_domains/'.format(storagerouter_guid),
data=data
)
- storagerouter = StoragerouterHelper.get_storagerouter_by_ip(storagerouter_ip)
+ storagerouter = StoragerouterHelper.get_storagerouter_by_guid(storagerouter_guid=storagerouter_guid)
if len(set(domain_guids) - set(storagerouter.regular_domains)) != 0 or \
len(set(recovery_domain_guids) - set(storagerouter.recovery_domains)) != 0:
- error_msg = "Failed to link (recovery) domain(s) to storagerouter `{0}`".format(storagerouter_ip)
+ error_msg = "Failed to link (recovery) domain(s) to storagerouter `{0}`".format(storagerouter_guid)
DomainSetup.LOGGER.error(error_msg)
raise RuntimeError(error_msg)
else:
DomainSetup.LOGGER.info("Successfully linked domain (recovery) domain(s) to storagerouter `{0}`"
- .format(storagerouter_ip))
+ .format(storagerouter_guid))
return
- @staticmethod
+ @classmethod
@required_backend
- def link_domains_to_backend(domain_details, albabackend_name, api):
+ def link_domains_to_backend(cls, domain_details, albabackend_name, *args, **kwargs):
"""
Link a existing domain(s) and/or recovery (domains) to a storagerouter
@@ -113,9 +109,6 @@ def link_domains_to_backend(domain_details, albabackend_name, api):
:type domain_details: dict
:param albabackend_name: name of a existing alba backend
:type albabackend_name: str
- :param api: specify a valid api connection to the setup
- :type api: helpers.api.OVSClient
- :return:
"""
albabackend_guid = BackendHelper.get_backend_guid_by_name(albabackend_name)
@@ -126,7 +119,7 @@ def link_domains_to_backend(domain_details, albabackend_name, api):
domain_guids.append(DomainHelper.get_domainguid_by_name(domain_name))
data = {"domain_guids": domain_guids}
- api.post(
+ cls.api.post(
api='/backends/{0}/set_domains/'.format(albabackend_guid),
data=data
)
diff --git a/setup/proxy.py b/setup/proxy.py
index 1da982c..7098c61 100644
--- a/setup/proxy.py
+++ b/setup/proxy.py
@@ -1,4 +1,5 @@
# Copyright (C) 2016 iNuron NV
+# Copyright (C) 2016 iNuron NV
#
# This file is part of Open vStorage Open Source Edition (OSE),
# as available from
@@ -17,6 +18,7 @@
from ovs.dal.lists.vpoollist import VPoolList
from ovs.extensions.generic.configuration import Configuration
from ovs.extensions.generic.logger import Logger
+from ovs_extensions.generic.toolbox import ExtensionsToolbox
from ovs.extensions.services.servicefactory import ServiceFactory
from ovs.lib.helpers.toolbox import Toolbox
from ovs.dal.hybrids.service import Service
@@ -48,7 +50,7 @@ def configure_proxy(backend_name, proxy_configuration):
faulty_keys = [key for key in proxy_configuration.keys() if key not in ProxySetup.PARAMS]
if len(faulty_keys) > 0:
raise ValueError('{0} are unsupported keys for proxy configuration.'.format(', '.join(faulty_keys)))
- Toolbox.verify_required_params(ProxySetup.PARAMS, proxy_configuration)
+ ExtensionsToolbox.verify_required_params(ProxySetup.PARAMS, proxy_configuration)
vpools = VPoolList.get_vpools()
service_manager = ServiceFactory.get_manager()
with open('/root/old_proxies', 'w') as backup_file:
diff --git a/setup/roles.py b/setup/roles.py
index e56b8ea..b07ae44 100644
--- a/setup/roles.py
+++ b/setup/roles.py
@@ -15,11 +15,12 @@
# but WITHOUT ANY WARRANTY of any kind.
from ovs.extensions.generic.logger import Logger
+from ..helpers.ci_constants import CIConstants
from ..helpers.storagerouter import StoragerouterHelper
from ..validate.decorators import check_role_on_disk
-class RoleSetup(object):
+class RoleSetup(CIConstants):
LOGGER = Logger("setup-ci_role_setup")
CONFIGURE_DISK_TIMEOUT = 300
@@ -29,28 +30,27 @@ class RoleSetup(object):
def __init__(self):
pass
- @staticmethod
+ @classmethod
@check_role_on_disk
- def add_disk_role(storagerouter_ip, diskname, roles, api, min_size=MIN_PARTITION_SIZE):
+ def add_disk_role(cls, storagerouter_ip, diskname, roles, min_size=MIN_PARTITION_SIZE, *args, **kwargs):
+
"""
Partition and adds roles to a disk
- :param storagerouter_ip: ip address of a existing storagerouter
+ :param storagerouter_ip: guid of an existing storagerouter
:type storagerouter_ip: str
:param diskname: shortname of a disk (e.g. sdb)
:type diskname: str
:param roles: list of roles you want to add to the disk
:type roles: list
- :param api: specify a valid api connection to the setup
- :type api: helpers.api.OVSClient
:param min_size: minimum total_partition_size that is required to allocate the disk role
:type min_size: int
:return:
"""
# Fetch information
- storagerouter_guid = StoragerouterHelper.get_storagerouter_guid_by_ip(storagerouter_ip)
- disk = StoragerouterHelper.get_disk_by_ip(storagerouter_ip, diskname)
+ storagerouter_guid = StoragerouterHelper.get_storagerouter_by_ip(storagerouter_ip).guid
+ disk = StoragerouterHelper.get_disk_by_name(storagerouter_guid, diskname)
# Check if there are any partitions on the disk, if so check if there is enough space
unused_partitions = []
if len(disk.partitions) > 0:
@@ -60,7 +60,7 @@ def add_disk_role(storagerouter_ip, diskname, roles, api, min_size=MIN_PARTITION
# Check if the partition is in use - could possibly write role on unused partition
if partition.mountpoint is None:
# Means no output -> partition not mounted
- # @Todo support partitions that are not sequentional
+ # @Todo support partitions that are not sequential
unused_partitions.append(partition)
# Elect biggest unused partition as potential candidate
@@ -72,21 +72,21 @@ def add_disk_role(storagerouter_ip, diskname, roles, api, min_size=MIN_PARTITION
if ((disk.size-total_partition_size)/1024**3) > min_size:
# disk is still large enough, let the partitioning begin and apply some roles!
RoleSetup.configure_disk(storagerouter_guid=storagerouter_guid, disk_guid=disk.guid, offset=total_partition_size + 1,
- size=(disk.size-total_partition_size)-1, roles=roles, api=api)
+ size=(disk.size-total_partition_size)-1, roles=roles)
elif biggest_unused_partition is not None and (biggest_unused_partition.size/1024**3) > min_size:
RoleSetup.configure_disk(storagerouter_guid=storagerouter_guid, disk_guid=disk.guid, offset=biggest_unused_partition.offset,
- size=biggest_unused_partition.size, roles=roles, api=api, partition_guid=biggest_unused_partition.guid)
+ size=biggest_unused_partition.size, roles=roles, partition_guid=biggest_unused_partition.guid)
else:
# disk is too small
raise RuntimeError("Disk `{0}` on node `{1}` is too small for role(s) `{2}`, min. total_partition_size is `{3}`"
- .format(diskname, storagerouter_ip, roles, min_size))
+ .format(diskname, storagerouter_guid, roles, min_size))
else:
# there are no partitions on the disk, go nuke it!
- RoleSetup.configure_disk(storagerouter_guid, disk.guid, 0, disk.size, roles, api)
+ RoleSetup.configure_disk(storagerouter_guid, disk.guid, 0, disk.size, roles)
- @staticmethod
- def configure_disk(storagerouter_guid, disk_guid, offset, size, roles, api, partition_guid=None,
- timeout=CONFIGURE_DISK_TIMEOUT):
+ @classmethod
+ def configure_disk(cls, storagerouter_guid, disk_guid, offset, size, roles, partition_guid=None,
+ timeout=CONFIGURE_DISK_TIMEOUT, *args, **kwargs):
"""
Partition a disk and add roles to it
@@ -100,8 +100,6 @@ def configure_disk(storagerouter_guid, disk_guid, offset, size, roles, api, part
:type size: int
:param roles: roles to add to a partition (e.g. ['DB', 'WRITE'])
:type roles: list
- :param api: specify a valid api connection to the setup
- :type api: helpers.api.OVSClient
:param timeout: time to wait for the task to complete
:type timeout: int
:param partition_guid: guid of the partition
@@ -116,12 +114,11 @@ def configure_disk(storagerouter_guid, disk_guid, offset, size, roles, api, part
'roles': roles,
'partition_guid': partition_guid
}
- task_guid = api.post(
+ task_guid = cls.api.post(
api='/storagerouters/{0}/configure_disk/'.format(storagerouter_guid),
data=data
)
- task_result = api.wait_for_task(task_id=task_guid, timeout=timeout)
-
+ task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout)
if not task_result[0]:
error_msg = "Adjusting disk `{0}` has failed on storagerouter `{1}` with error '{2}'" \
.format(disk_guid, storagerouter_guid, task_result[1])
diff --git a/setup/storagedriver.py b/setup/storagedriver.py
index 44fae1c..635948f 100644
--- a/setup/storagedriver.py
+++ b/setup/storagedriver.py
@@ -13,8 +13,8 @@
#
# Open vStorage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY of any kind.
-from ovs.lib.helpers.toolbox import Toolbox
from ovs.extensions.generic.logger import Logger
+from ovs_extensions.generic.toolbox import ExtensionsToolbox
from ..helpers.storagedriver import StoragedriverHelper
from ..helpers.vpool import VPoolHelper
@@ -32,7 +32,7 @@ def change_config(vpool_name, vpool_details, storagerouter_ip, *args, **kwargs):
# Settings volumedriver
storagedriver_config = vpool_details.get('storagedriver')
if storagedriver_config is not None:
- Toolbox.verify_required_params(StoragedriverSetup.STORAGEDRIVER_PARAMS, storagedriver_config)
+ ExtensionsToolbox.verify_required_params(StoragedriverSetup.STORAGEDRIVER_PARAMS, storagedriver_config)
StoragedriverSetup.LOGGER.info('Updating volumedriver configuration of vPool `{0}` on storagerouter `{1}`.'.format(vpool_name, storagerouter_ip))
vpool = VPoolHelper.get_vpool_by_name(vpool_name)
storagedriver = [sd for sd in vpool.storagedrivers if sd.storagerouter.ip == storagerouter_ip][0]
diff --git a/setup/vdisk.py b/setup/vdisk.py
index 95bd28d..b4c7030 100644
--- a/setup/vdisk.py
+++ b/setup/vdisk.py
@@ -15,13 +15,14 @@
# but WITHOUT ANY WARRANTY of any kind.
from ovs.extensions.generic.logger import Logger
+from ..helpers.ci_constants import CIConstants
from ..helpers.storagerouter import StoragerouterHelper
from ..helpers.vdisk import VDiskHelper
from ..helpers.vpool import VPoolHelper
from ..validate.decorators import required_vdisk, required_snapshot, required_vtemplate
-class VDiskSetup(object):
+class VDiskSetup(CIConstants):
LOGGER = Logger("setup-ci_vdisk_setup")
CREATE_SNAPSHOT_TIMEOUT = 60
@@ -34,9 +35,9 @@ class VDiskSetup(object):
def __init__(self):
pass
- @staticmethod
- def create_snapshot(snapshot_name, vdisk_name, vpool_name, api, consistent=True, sticky=True,
- timeout=CREATE_SNAPSHOT_TIMEOUT):
+ @classmethod
+ def create_snapshot(cls, snapshot_name, vdisk_name, vpool_name, consistent=True, sticky=True,
+ timeout=CREATE_SNAPSHOT_TIMEOUT, *args, **kwargs):
"""
Create a new snapshot for a vdisk
@@ -49,8 +50,6 @@ def create_snapshot(snapshot_name, vdisk_name, vpool_name, api, consistent=True,
:type consistent: bool
:param sticky: let this snapshot stick forever?
:type sticky: bool
- :param api: specify a valid api connection to the setup
- :type api: ci.helpers.api.OVSClient
:param timeout: time to wait for the task to complete
:type timeout: int
:param vpool_name: name of a existing vpool
@@ -66,11 +65,11 @@ def create_snapshot(snapshot_name, vdisk_name, vpool_name, api, consistent=True,
'sticky': sticky
}
- task_guid = api.post(
+ task_guid = cls.api.post(
api='/vdisks/{0}/create_snapshot/'.format(vdisk_guid),
data=data
)
- task_result = api.wait_for_task(task_id=task_guid, timeout=timeout)
+ task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout)
if not task_result[0]:
error_msg = "Creating snapshot `{0}` for vdisk `{1}` on vPool `{2}` has failed"\
@@ -82,8 +81,8 @@ def create_snapshot(snapshot_name, vdisk_name, vpool_name, api, consistent=True,
.format(snapshot_name, vdisk_name, vpool_name))
return task_result[1]
- @staticmethod
- def create_vdisk(vdisk_name, vpool_name, size, storagerouter_ip, api, timeout=CREATE_VDISK_TIMEOUT):
+ @classmethod
+ def create_vdisk(cls, vdisk_name, vpool_name, size, storagerouter_ip, timeout=CREATE_VDISK_TIMEOUT, *args, **kwargs):
"""
Create a new vDisk on a certain vPool/storagerouter
:param vdisk_name: location of a vdisk on a vpool (e.g. /mnt/vpool/test.raw = test.raw, /mnt/vpool/volumes/test.raw = volumes/test.raw )
@@ -94,8 +93,6 @@ def create_vdisk(vdisk_name, vpool_name, size, storagerouter_ip, api, timeout=CR
:type size: int
:param storagerouter_ip: ip address of a existing storagerouter
:type storagerouter_ip: str
- :param api: specify a valid api connection to the setup
- :type api: ci.helpers.api.OVSClient
:param timeout: time to wait for the task to complete
:type timeout: int
:param vpool_name: name of a existing vpool
@@ -117,11 +114,11 @@ def create_vdisk(vdisk_name, vpool_name, size, storagerouter_ip, api, timeout=CR
"vpool_guid": vpool_guid,
"storagerouter_guid": storagerouter_guid}
- task_guid = api.post(
+ task_guid = cls.api.post(
api='/vdisks/',
data=data
)
- task_result = api.wait_for_task(task_id=task_guid, timeout=timeout)
+ task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout)
if not task_result[0]:
error_msg = "Creating vdisk `{0}` on vPool `{1}` on storagerouter `{2}` has failed with error {3}"\
@@ -133,24 +130,23 @@ def create_vdisk(vdisk_name, vpool_name, size, storagerouter_ip, api, timeout=CR
.format(vdisk_name, vpool_name, storagerouter_ip))
return task_result[1]
- @staticmethod
+ @classmethod
@required_vdisk
- def move_vdisk(vdisk_guid, target_storagerouter_guid, api, timeout=60):
+ def move_vdisk(cls, vdisk_guid, target_storagerouter_guid, timeout=60, *args, **kwargs):
"""
Moves a vdisk
:param vdisk_guid: guid of the vdisk
:param target_storagerouter_guid: guid of the storuagerouter to move to
- :param api: instance of ovs client
:param timeout: timeout in seconds
:return:
"""
data = {"target_storagerouter_guid": target_storagerouter_guid}
- task_guid = api.post(
+ task_guid = cls.api.post(
api='/vdisks/{0}/move/'.format(vdisk_guid),
data=data
)
- task_result = api.wait_for_task(task_id=task_guid, timeout=timeout)
+ task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout)
if not task_result[0]:
error_msg = "Moving vdisk {0} to {1} has failed with {2}.".format(
@@ -162,11 +158,11 @@ def move_vdisk(vdisk_guid, target_storagerouter_guid, api, timeout=60):
"Vdisk {0} should have been moved to {1}.".format(vdisk_guid, target_storagerouter_guid))
return task_result[1]
- @staticmethod
+ @classmethod
@required_vdisk
@required_snapshot
- def create_clone(vdisk_name, vpool_name, new_vdisk_name, storagerouter_ip, api, snapshot_id=None,
- timeout=CREATE_CLONE_TIMEOUT):
+ def create_clone(cls, vdisk_name, vpool_name, new_vdisk_name, storagerouter_ip, snapshot_id=None,
+ timeout=CREATE_CLONE_TIMEOUT, *args, **kwargs):
"""
Create a new vDisk on a certain vPool/storagerouter
:param vdisk_name: location of a vdisk on a vpool (e.g. /mnt/vpool/test.raw = test.raw, /mnt/vpool/volumes/test.raw = volumes/test.raw )
@@ -179,8 +175,6 @@ def create_clone(vdisk_name, vpool_name, new_vdisk_name, storagerouter_ip, api,
:type storagerouter_ip: str
:param snapshot_id: GUID of a existing snapshot (DEFAULT=None -> will create new snapshot)
:type snapshot_id: str
- :param api: specify a valid api connection to the setup
- :type api: ci.helpers.api.OVSClient
:param timeout: time to wait for the task to complete
:type timeout: int
:param vpool_name: name of a existing vpool
@@ -210,11 +204,11 @@ def create_clone(vdisk_name, vpool_name, new_vdisk_name, storagerouter_ip, api,
"storagerouter_guid": storagerouter_guid,
"snapshot_id": snapshot_id}
- task_guid = api.post(
+ task_guid = cls.api.post(
api='/vdisks/{0}/clone'.format(vdisk.guid),
data=data
)
- task_result = api.wait_for_task(task_id=task_guid, timeout=timeout)
+ task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout)
if not task_result[0]:
error_msg = "Creating clone `{0}` with snapshot_id `{4}` on vPool `{1}` on storagerouter `{2}` " \
@@ -228,9 +222,9 @@ def create_clone(vdisk_name, vpool_name, new_vdisk_name, storagerouter_ip, api,
snapshot_id))
return task_result[1]
- @staticmethod
+ @classmethod
@required_vdisk
- def set_vdisk_as_template(vdisk_name, vpool_name, api, timeout=SET_VDISK_AS_TEMPLATE_TIMEOUT):
+ def set_vdisk_as_template(cls, vdisk_name, vpool_name, timeout=SET_VDISK_AS_TEMPLATE_TIMEOUT, *args, **kwargs):
"""
Create a new vDisk on a certain vPool/storagerouter
Set a existing vDisk as vTemplate
@@ -240,17 +234,15 @@ def set_vdisk_as_template(vdisk_name, vpool_name, api, timeout=SET_VDISK_AS_TEMP
:type vdisk_name: str
:param vpool_name: name of a existing vpool
:type vpool_name: str
- :param api: specify a valid api connection to the setup
- :type api: ci.helpers.api.OVSClient
:param timeout: time to wait for the task to complete
"""
# fetch the requirements
vdisk = VDiskHelper.get_vdisk_by_name(vdisk_name, vpool_name)
- task_guid = api.post(
+ task_guid = cls.api.post(
api='/vdisks/{0}/set_as_template'.format(vdisk.guid)
)
- task_result = api.wait_for_task(task_id=task_guid, timeout=timeout)
+ task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout)
if not task_result[0]:
error_msg = "Creating vTemplate `{0}` has failed with error {1}".format(vdisk_name, task_result[1])
@@ -260,10 +252,10 @@ def set_vdisk_as_template(vdisk_name, vpool_name, api, timeout=SET_VDISK_AS_TEMP
VDiskSetup.LOGGER.info("Creating vTemplate `{0}` should have succeeded".format(vdisk_name))
return task_result[1]
- @staticmethod
+ @classmethod
@required_vtemplate
- def create_from_template(vdisk_name, vpool_name, new_vdisk_name, storagerouter_ip, api,
- timeout=SET_VDISK_AS_TEMPLATE_TIMEOUT):
+ def create_from_template(cls, vdisk_name, vpool_name, new_vdisk_name, storagerouter_ip,
+ timeout=SET_VDISK_AS_TEMPLATE_TIMEOUT, *args, **kwargs):
"""
Create a new vDisk on a certain vPool/storagerouter
Set a existing vDisk as vTemplate
@@ -275,8 +267,6 @@ def create_from_template(vdisk_name, vpool_name, new_vdisk_name, storagerouter_i
:type new_vdisk_name: str
:param storagerouter_ip: ip address of a existing storagerouter where the clone will be deployed
:type storagerouter_ip: str
- :param api: specify a valid api connection to the setup
- :type api: ci.helpers.api.OVSClient
:param timeout: time to wait for the task to complete
:return: dict with info about the new vdisk {'vdisk_guid': new_vdisk.guid, 'name': new_vdisk.name, 'backingdevice': devicename}
:rtype: dict
@@ -294,11 +284,11 @@ def create_from_template(vdisk_name, vpool_name, new_vdisk_name, storagerouter_i
data = {"name": official_new_vdisk_name,
"storagerouter_guid": storagerouter_guid}
- task_guid = api.post(
+ task_guid = cls.api.post(
api='/vdisks/{0}/create_from_template'.format(vdisk.guid),
data=data
)
- task_result = api.wait_for_task(task_id=task_guid, timeout=timeout)
+ task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout)
if not task_result[0]:
error_msg = "Creating vTemplate `{0}` has failed with error {1}".format(vdisk_name, task_result[1])
@@ -308,9 +298,9 @@ def create_from_template(vdisk_name, vpool_name, new_vdisk_name, storagerouter_i
VDiskSetup.LOGGER.info("Creating vTemplate `{0}` should have succeeded".format(vdisk_name))
return task_result[1]
- @staticmethod
+ @classmethod
@required_vdisk
- def rollback_to_snapshot(vdisk_name, vpool_name, snapshot_id, api, timeout=ROLLBACK_VDISK_TIMEOUT):
+ def rollback_to_snapshot(cls, vdisk_name, vpool_name, snapshot_id, timeout=ROLLBACK_VDISK_TIMEOUT, *args, **kwargs):
"""
Rollback a vdisk to a certain snapshot
@@ -321,8 +311,6 @@ def rollback_to_snapshot(vdisk_name, vpool_name, snapshot_id, api, timeout=ROLLB
:type vpool_name: str
:param snapshot_id: guid of a snapshot for the chosen vdisk
:type snapshot_id: str
- :param api: specify a valid api connection to the setup
- :type api: ci.helpers.api.OVSClient
:param timeout: time to wait for the task to complete
"""
@@ -331,11 +319,11 @@ def rollback_to_snapshot(vdisk_name, vpool_name, snapshot_id, api, timeout=ROLLB
snapshot = VDiskHelper.get_snapshot_by_guid(snapshot_guid=snapshot_id, vdisk_name=vdisk_name,
vpool_name=vpool_name)
- task_guid = api.post(
+ task_guid = cls.api.post(
api='/vdisks/{0}/rollback'.format(vdisk_guid),
data={"timestamp": snapshot['timestamp']}
)
- task_result = api.wait_for_task(task_id=task_guid, timeout=timeout)
+ task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout)
if not task_result[0]:
error_msg = "Rollback vDisk `{0}` has failed with error {1}".format(vdisk_name, task_result[1])
@@ -345,9 +333,9 @@ def rollback_to_snapshot(vdisk_name, vpool_name, snapshot_id, api, timeout=ROLLB
VDiskSetup.LOGGER.info("Rollback vDisk `{0}` should have succeeded".format(vdisk_name))
return task_result[1]
- @staticmethod
+ @classmethod
@required_vdisk
- def set_config_params(vdisk_name, vpool_name, config, api, timeout=SET_CONFIG_VDISK_TIMEOUT):
+ def set_config_params(cls, vdisk_name, vpool_name, config, timeout=SET_CONFIG_VDISK_TIMEOUT, *args, **kwargs):
"""
Rollback a vdisk to a certain snapshot
@@ -366,8 +354,6 @@ def set_config_params(vdisk_name, vpool_name, config, api, timeout=SET_CONFIG_VD
]
}
:type config: dict
- :param api: specify a valid api connection to the setup
- :type api: ci.helpers.api.OVSClient
:param timeout: time to wait for the task to complete
:type timeout: int
:rtype: dict
@@ -377,11 +363,11 @@ def set_config_params(vdisk_name, vpool_name, config, api, timeout=SET_CONFIG_VD
# fetch the requirements
vdisk_guid = VDiskHelper.get_vdisk_by_name(vdisk_name=vdisk_name, vpool_name=vpool_name).guid
- task_guid = api.post(
+ task_guid = cls.api.post(
api='/vdisks/{0}/set_config_params'.format(vdisk_guid),
data={"new_config_params": config}
)
- task_result = api.wait_for_task(task_id=task_guid, timeout=timeout)
+ task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout)
if not task_result[0]:
error_msg = "Setting config vDisk `{0}` has failed with error {1}".format(vdisk_name, task_result[1])
diff --git a/setup/vpool.py b/setup/vpool.py
index 6366ddf..a926677 100644
--- a/setup/vpool.py
+++ b/setup/vpool.py
@@ -13,17 +13,19 @@
#
# Open vStorage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY of any kind.
-from ovs.lib.generic import GenericController
-from ovs.lib.helpers.toolbox import Toolbox
+
from ovs.extensions.generic.logger import Logger
+from ovs_extensions.generic.toolbox import ExtensionsToolbox
+from ovs.lib.generic import GenericController
from ..helpers.backend import BackendHelper
-from ..helpers.storagedriver import StoragedriverHelper
+from ..helpers.ci_constants import CIConstants
from ..helpers.storagerouter import StoragerouterHelper
+from ..helpers.storagedriver import StoragedriverHelper
from ..helpers.vpool import VPoolHelper
from ..validate.decorators import required_roles, check_vpool
-class VPoolSetup(object):
+class VPoolSetup(CIConstants):
LOGGER = Logger('setup-ci_vpool_setup')
ADD_VPOOL_TIMEOUT = 500
@@ -36,10 +38,10 @@ class VPoolSetup(object):
def __init__(self):
pass
- @staticmethod
+ @classmethod
@check_vpool
@required_roles(REQUIRED_VPOOL_ROLES, 'LOCAL')
- def add_vpool(vpool_name, vpool_details, api, storagerouter_ip, proxy_amount=2, timeout=ADD_VPOOL_TIMEOUT, *args, **kwargs):
+ def add_vpool(cls, vpool_name, vpool_details, storagerouter_ip, proxy_amount=2, timeout=ADD_VPOOL_TIMEOUT, *args, **kwargs):
"""
Adds a VPool to a storagerouter
@@ -49,8 +51,6 @@ def add_vpool(vpool_name, vpool_details, api, storagerouter_ip, proxy_amount=2,
:type vpool_details: dict
:param timeout: specify a timeout
:type timeout: int
- :param api: specify a valid api connection to the setup
- :type api: helpers.api.OVSClient
:param storagerouter_ip: ip of the storagerouter to add the vpool too
:type storagerouter_ip: str
:param proxy_amount: amount of proxies for this vpool
@@ -110,13 +110,13 @@ def add_vpool(vpool_name, vpool_details, api, storagerouter_ip, proxy_amount=2,
error_msg = 'Wrong `block_cache->location` in vPool configuration, it should be `disk` or `backend`'
VPoolSetup.LOGGER.error(error_msg)
raise RuntimeError(error_msg)
-
- task_guid = api.post(
+
+ task_guid = cls.api.post(
api='/storagerouters/{0}/add_vpool/'.format(
- StoragerouterHelper.get_storagerouter_guid_by_ip(storagerouter_ip)),
+ StoragerouterHelper.get_storagerouter_by_ip(storagerouter_ip).guid),
data=api_data
)
- task_result = api.wait_for_task(task_id=task_guid, timeout=timeout)
+ task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout)
if not task_result[0]:
error_msg = 'vPool {0} has failed to create on storagerouter {1} because: {2}'.format(vpool_name, storagerouter_ip, task_result[1])
VPoolSetup.LOGGER.error(error_msg)
@@ -127,7 +127,7 @@ def add_vpool(vpool_name, vpool_details, api, storagerouter_ip, proxy_amount=2,
# Settings volumedriver
storagedriver_config = vpool_details.get('storagedriver')
if storagedriver_config is not None:
- Toolbox.verify_required_params(VPoolSetup.STORAGEDRIVER_PARAMS, storagedriver_config)
+ ExtensionsToolbox.verify_required_params(VPoolSetup.STORAGEDRIVER_PARAMS, storagedriver_config)
VPoolSetup.LOGGER.info('Updating volumedriver configuration of vPool `{0}` on storagerouter `{1}`.'.format(vpool_name, storagerouter_ip))
vpool = VPoolHelper.get_vpool_by_name(vpool_name)
storagedriver = [sd for sd in vpool.storagedrivers if sd.storagerouter.ip == storagerouter_ip][0]
diff --git a/validate/backend.py b/validate/backend.py
index dcc1452..cffccc7 100644
--- a/validate/backend.py
+++ b/validate/backend.py
@@ -52,7 +52,7 @@ def check_preset_on_backend(preset_name, albabackend_name):
@staticmethod
def check_policies_on_preset(preset_name, albabackend_name, policies):
"""
- Check if a preset is available on a backend
+ Check if given policies match with the specified backend
:param preset_name: name of a preset
:type preset_name: str
@@ -126,16 +126,17 @@ def check_available_osds_on_asdmanager(ip, disks):
slot_map[disk_name] = fetched_disk
available_disks = {}
for disk, amount_asds in disks.iteritems():
+
# check if requested disk is present and available in fetched_disks
if disk not in slot_map:
- BackendValidation.LOGGER.error("Disk `{0}` was NOT found on node `{1}`!".format(ip, disk))
+ BackendValidation.LOGGER.error("Disk `{0}` was NOT found on node `{1}`!".format(disk, ip))
continue
if slot_map[disk]['available'] is False:
- BackendValidation.LOGGER.error("Disk `{0}` is NOT available on node `{1}`!".format(ip, disk))
+ BackendValidation.LOGGER.error("Disk `{0}` is NOT available on node `{1}`!".format(disk, ip))
continue
# add disk to available disks
available_disks[disk] = amount_asds
- BackendValidation.LOGGER.info("Disk `{0}` is available on node `{1}`!".format(ip, disk))
+ BackendValidation.LOGGER.info("Disk `{0}` is available on node `{1}`!".format(disk, ip))
BackendValidation.LOGGER.info("The following disks are available for use on `{0}`: {1}".format(ip, available_disks))
return available_disks
diff --git a/validate/roles.py b/validate/roles.py
index f37f59e..e1f8eeb 100644
--- a/validate/roles.py
+++ b/validate/roles.py
@@ -15,6 +15,7 @@
# but WITHOUT ANY WARRANTY of any kind.
from ovs.extensions.generic.logger import Logger
from ..helpers.disk import DiskHelper
+from ..helpers.storagerouter import StoragerouterHelper
class RoleValidation(object):
@@ -31,7 +32,7 @@ def check_required_roles(roles, storagerouter_ip=None, location="GLOBAL"):
:param roles: the required roles
:type roles: list
- :param storagerouter_ip: ip address of a storagerouter
+ :param storagerouter_ip: guid of a storagerouter
:type storagerouter_ip: str
:param location:
* GLOBAL: checks the whole cluster if certain roles are available
@@ -39,11 +40,12 @@ def check_required_roles(roles, storagerouter_ip=None, location="GLOBAL"):
:type location: str
:return: None
"""
-
# fetch availabe roles
if location == "LOCAL":
# LOCAL
- available_roles = DiskHelper.get_roles_from_disks(storagerouter_ip=storagerouter_ip)
+ storagerouter_guid = StoragerouterHelper.get_storagerouter_by_ip(storagerouter_ip).guid
+
+ available_roles = DiskHelper.get_roles_from_disks(storagerouter_guid=storagerouter_guid)
else:
# GLOBAL
available_roles = DiskHelper.get_roles_from_disks()
@@ -62,7 +64,7 @@ def check_required_roles(roles, storagerouter_ip=None, location="GLOBAL"):
# append storagerouter_ip if searching on a LOCAL node
if location == "LOCAL":
- error_msg += " on storagerouter {0}".format(storagerouter_ip)
+ error_msg += " on storagerouter {0}".format(storagerouter_guid)
RoleValidation.LOGGER.error(error_msg)
raise RuntimeError(error_msg)
@@ -82,4 +84,5 @@ def check_role_on_disk(roles, storagerouter_ip, disk_name):
:return: if available on disk
:rtype: bool
"""
- return len(set(roles).difference(set(DiskHelper.get_roles_from_disk(storagerouter_ip, disk_name)))) == 0
+ storagerouter_guid = StoragerouterHelper.get_storagerouter_by_ip(storagerouter_ip).guid
+ return len(set(roles).difference(set(DiskHelper.get_roles_from_disk(storagerouter_guid, disk_name)))) == 0