Skip to content

Commit

Permalink
Dev: ui_sbd: Add new 'crm sbd' sublevel (jsc#PED-8256)
Browse files Browse the repository at this point in the history
- Add 'crm sbd remove' command
  • Loading branch information
liangxin1300 committed Jul 17, 2024
1 parent 276b813 commit 2f10c6e
Show file tree
Hide file tree
Showing 5 changed files with 412 additions and 16 deletions.
7 changes: 6 additions & 1 deletion crmsh/bootstrap.py
Original file line number Diff line number Diff line change
Expand Up @@ -2785,7 +2785,12 @@ def sync_file(path):
"""
Sync files between cluster nodes
"""
if _context.skip_csync2:
if _context:
skip_csync2 = _context.skip_csync2
else:
skip_csync2 = not ServiceManager().service_is_active(CSYNC2_SERVICE)

if skip_csync2:
utils.cluster_copy_file(path, nodes=_context.node_list_in_cluster, output=False)
else:
csync2_update(path)
Expand Down
66 changes: 53 additions & 13 deletions crmsh/sbd.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,17 +85,31 @@ def _adjust_sbd_watchdog_timeout_with_diskless_and_qdevice(self):
logger.warning("sbd_watchdog_timeout is set to {} for qdevice, it was {}".format(self.SBD_WATCHDOG_TIMEOUT_DEFAULT_WITH_QDEVICE, self.sbd_watchdog_timeout))
self.sbd_watchdog_timeout = self.SBD_WATCHDOG_TIMEOUT_DEFAULT_WITH_QDEVICE

@staticmethod
def get_sbd_device_metadata(dev, timeout_only=False) -> dict:
"""
Extract metadata from sbd device header
"""
out = sh.cluster_shell().get_stdout_or_raise_error("sbd -d {} dump".format(dev))
pattern = r"UUID\s+:\s+(\S+)|Timeout\s+\((\w+)\)\s+:\s+(\d+)"
matches = re.findall(pattern, out)
sbd_info = {}
for uuid, timeout_type, timeout_value in matches:
if uuid and not timeout_only:
sbd_info["uuid"] = uuid
elif timeout_type and timeout_value:
sbd_info[timeout_type] = int(timeout_value)
return sbd_info

@staticmethod
def get_sbd_msgwait(dev):
"""
Get msgwait for sbd device
"""
out = sh.cluster_shell().get_stdout_or_raise_error("sbd -d {} dump".format(dev))
# Format like "Timeout (msgwait) : 30"
res = re.search("\(msgwait\)\s+:\s+(\d+)", out)
res = SBDTimeout.get_sbd_device_metadata(dev).get("msgwait")
if not res:
raise ValueError("Cannot get sbd msgwait for {}".format(dev))
return int(res.group(1))
raise ValueError(f"Cannot get sbd msgwait for {dev}")
return res

@staticmethod
def get_sbd_watchdog_timeout():
Expand Down Expand Up @@ -269,6 +283,7 @@ class SBDManager(object):
DISKLESS_CRM_CMD = "crm configure property stonith-enabled=true stonith-watchdog-timeout={} stonith-timeout={}"
SBD_RA = "stonith:fence_sbd"
SBD_RA_ID = "stonith-sbd"
SBD_DEVICE_MAX = 3

def __init__(self, context):
"""
Expand All @@ -292,11 +307,10 @@ def _get_device_uuid(dev, node=None):
"""
Get UUID for specific device and node
"""
out = sh.cluster_shell().get_stdout_or_raise_error("sbd -d {} dump".format(dev), node)
res = re.search("UUID\s*:\s*(.*)\n", out)
res = SBDTimeout.get_sbd_device_metadata(dev).get("uuid")
if not res:
raise ValueError("Cannot find sbd device UUID for {}".format(dev))
return res.group(1)
return res

def _compare_device_uuid(self, dev, node_list):
"""
Expand All @@ -314,8 +328,8 @@ def _verify_sbd_device(self, dev_list, compare_node_list=[]):
"""
Verify sbd device
"""
if len(dev_list) > 3:
raise ValueError("Maximum number of SBD device is 3")
if len(dev_list) > self.SBD_DEVICE_MAX:
raise ValueError(f"Maximum number of SBD device is {self.SBD_DEVICE_MAX}")
for dev in dev_list:
if not utils.is_block_device(dev):
raise ValueError("{} doesn't look like a block device".format(dev))
Expand Down Expand Up @@ -568,16 +582,16 @@ def join_sbd(self, remote_user, peer_host):
bootstrap.invoke("systemctl enable sbd.service")

@classmethod
def verify_sbd_device(cls):
def verify_sbd_device(cls, device_list=[], compare_node_list=[]):
"""
This classmethod is for verifying sbd device on a running cluster
Raise ValueError for exceptions
"""
inst = cls(bootstrap.Context())
dev_list = inst._get_sbd_device_from_config()
dev_list = device_list or inst._get_sbd_device_from_config()
if not dev_list:
raise ValueError("No sbd device configured")
inst._verify_sbd_device(dev_list, utils.list_cluster_nodes_except_me())
inst._verify_sbd_device(dev_list, compare_node_list)

@classmethod
def get_sbd_device_from_config(cls):
Expand Down Expand Up @@ -630,5 +644,31 @@ def clean_up_existing_sbd_resource():
sbd_id_list = xmlutil.CrmMonXmlParser().get_resource_id_list_via_type(SBDManager.SBD_RA)
if xmlutil.CrmMonXmlParser().is_resource_started(SBDManager.SBD_RA):
for sbd_id in sbd_id_list:
logger.info("Stop sbd resource %s", sbd_id)
utils.ext_cmd("crm resource stop {}".format(sbd_id))
logger.info("Remove sbd resource '%s'", ';' .join(sbd_id_list))
utils.ext_cmd("crm configure delete {}".format(' '.join(sbd_id_list)))


def disable_sbd_from_cluster():
'''
Disable SBD from cluster, the process includes:
- stop and remove sbd agent
- disable sbd.service
- adjust cluster attributes
- adjust related timeout values
'''
clean_up_existing_sbd_resource()

cluster_nodes = utils.list_cluster_nodes()
service_manager = ServiceManager()
for node in cluster_nodes:
if service_manager.service_is_enabled("sbd.service", node):
logger.info("Disable sbd.service on node %s", node)
service_manager.disable_service("sbd.service", node)

out = sh.cluster_shell().get_stdout_or_raise_error("stonith_admin -L")
res = re.search("([0-9]+) fence device[s]* found", out)
# after disable sbd.service, check if sbd is the last stonith device
if res and int(res.group(1)) <= 1:
utils.cleanup_stonith_related_properties()
5 changes: 5 additions & 0 deletions crmsh/ui_root.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
from . import ui_resource
from . import ui_script
from . import ui_site
from . import ui_sbd


class Root(command.UI):
Expand Down Expand Up @@ -150,6 +151,10 @@ def do_report(self, context, *args):
def do_resource(self):
pass

@command.level(ui_sbd.SBD)
def do_sbd(self):
pass

@command.level(ui_script.Script)
@command.help('''Cluster scripts
Cluster scripts can perform cluster-wide configuration,
Expand Down
Loading

0 comments on commit 2f10c6e

Please sign in to comment.