From 2076381e3e266cd82dea4c579fd4a2a63bde8917 Mon Sep 17 00:00:00 2001 From: gkyratsas Date: Mon, 1 Jul 2019 11:51:23 +0200 Subject: [PATCH] Fixed osds_out/osds_down bug + ceph GW reboot bug --- stress_test_ceph_vmware/cephops.py | 12 ++++++------ stress_test_ceph_vmware/main.py | 5 ++--- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/stress_test_ceph_vmware/cephops.py b/stress_test_ceph_vmware/cephops.py index 4fb2ca3..502e649 100644 --- a/stress_test_ceph_vmware/cephops.py +++ b/stress_test_ceph_vmware/cephops.py @@ -49,21 +49,21 @@ def unset_noup(self): def get_down_osds(self): data, _ = self.client.run_cmd('ceph osd tree -f json') - down_osds = [] + self.down_osds = [] for node_osd in ast.literal_eval(data)['nodes']: if node_osd.get('type') == 'osd' and 'osd' in node_osd.get('name'): if node_osd.get('status') == 'down': - down_osds.append(node_osd.get('id')) - return down_osds + self.down_osds.append(node_osd.get('id')) + return self.down_osds def get_out_osds(self): data, _ = self.client.run_cmd('ceph osd tree -f json') - out_osds = [] + self.out_osds = [] for node_osd in ast.literal_eval(data)['nodes']: if node_osd.get('type') == 'osd' and 'osd' in node_osd.get('name'): if node_osd.get('reweight') != 1.0: - out_osds.append(node_osd.get('id')) - return out_osds + self.out_osds.append(node_osd.get('id')) + return self.out_osds @property def max_down_osds(self): diff --git a/stress_test_ceph_vmware/main.py b/stress_test_ceph_vmware/main.py index 72f7c92..639ab29 100644 --- a/stress_test_ceph_vmware/main.py +++ b/stress_test_ceph_vmware/main.py @@ -30,8 +30,8 @@ def __init__(self): self.vmwareops = VMwareOps() self.ops.get_vm_list() - if len(self.gateways) < 2: - self.reboot_allowed = False + if len(self.gateways) >= 2: + self.reboot_allowed = True if self.force_reboot: self.reboot_allowed = True @@ -103,7 +103,6 @@ def check_thresholds(self): while len(self.ops.vms) > self.max_vms: log.info(Fore.YELLOW + "VM count rose over the configured threshold. Removing VMs") self.destroy_vms(count=1) - if self.cephops.osd_out_count > self.cephops.max_down_osds: log.info(Fore.YELLOW + "Down OSDs count dropped below the configured thereshold. Adding back in.") for osd_id in self.cephops.out_osds: