From 01799a1f0bd72216ee4a794b9f1d8123ded81fd7 Mon Sep 17 00:00:00 2001 From: ParveezBaig Date: Fri, 23 Aug 2024 15:58:24 +0530 Subject: [PATCH] Refactor code --- .idea/misc.xml | 5 +- .idea/pxc-qa.iml | 3 +- .idea/workspace.xml | 139 +++-- base_test.py | 134 +++++ config.ini | 3 - config.py | 3 - pstress/pstress-run.py | 45 +- suite/correctness/chaosmonkey-test.py | 124 +--- suite/correctness/cluster_interaction.py | 209 ++----- suite/correctness/consistency_check.py | 132 +--- suite/correctness/crash_recovery.py | 220 ++----- suite/galera_sr/galera_basic_sr_qa.py | 97 +-- suite/galera_sr/sr_procedure.sql | 7 +- suite/galera_sr/thread_pool_qa.py | 109 +--- suite/loadtest/sysbench_load_test.py | 76 +-- suite/loadtest/sysbench_random_load_test.py | 79 +-- ...bench_wsrep_provider_option_random_test.py | 132 +--- suite/random_qa/pstress_crash_recovery_qa.py | 119 +--- suite/random_qa/pstress_random_qa.py | 95 +-- suite/random_qa/pxc_util.py | 97 --- suite/random_qa/random_mysqld_option_test.py | 126 ++-- suite/replication/backup_replication.py | 182 ++---- suite/replication/gtid_replication.py | 208 +------ suite/replication/replication.py | 261 +++----- suite/ssl/encryption_qa.py | 173 ++---- suite/ssl/ssl_qa.py | 139 +---- .../sysbench_customized_dataload_test.py | 73 +-- suite/sysbench_run/sysbench_oltp_test.py | 83 +-- suite/sysbench_run/sysbench_read_only_test.py | 78 +-- suite/upgrade/pxc_replication_upgrade.py | 327 +++------- suite/upgrade/pxc_upgrade.py | 414 +++---------- suite/upgrade/pxc_upgrade_replacement.py | 289 +-------- test/test_startup.py | 30 +- util/create_cnf.py | 42 +- util/createsql.py | 25 +- util/data_generator.py | 11 +- util/db_connection.py | 261 +++++++- util/executesql.py | 73 +++ util/prepared_statements.sql | 154 +---- util/ps_startup.py | 216 ++++--- util/pxc_startup.py | 476 ++++++++++++--- util/pxc_util.py | 54 ++ util/rqg_datagen.py | 59 +- util/sanity.py | 21 - util/sysbench_run.py | 540 ++++++++--------- util/table_checksum.py | 136 ++--- util/utility.py | 569 ++++++------------ 47 files changed, 2583 insertions(+), 4265 deletions(-) create mode 100644 base_test.py mode change 100644 => 100755 suite/galera_sr/galera_basic_sr_qa.py mode change 100644 => 100755 suite/galera_sr/thread_pool_qa.py mode change 100644 => 100755 suite/loadtest/sysbench_wsrep_provider_option_random_test.py delete mode 100644 suite/random_qa/pxc_util.py mode change 100644 => 100755 suite/ssl/encryption_qa.py mode change 100644 => 100755 suite/upgrade/pxc_replication_upgrade.py mode change 100644 => 100755 suite/upgrade/pxc_upgrade_replacement.py create mode 100644 util/executesql.py create mode 100644 util/pxc_util.py diff --git a/.idea/misc.xml b/.idea/misc.xml index 8e46f70..e2445a2 100644 --- a/.idea/misc.xml +++ b/.idea/misc.xml @@ -1,6 +1,9 @@ - + + + diff --git a/.idea/pxc-qa.iml b/.idea/pxc-qa.iml index a20ba97..a3166ab 100644 --- a/.idea/pxc-qa.iml +++ b/.idea/pxc-qa.iml @@ -4,11 +4,10 @@ - + - \ No newline at end of file diff --git a/.idea/workspace.xml b/.idea/workspace.xml index 31584f5..c13eb7a 100644 --- a/.idea/workspace.xml +++ b/.idea/workspace.xml @@ -1,12 +1,15 @@ + + - - + + + - + { + "associatedIndex": 5 +} - - - - - + + + + + + + + - - - - - - - + + + + + + + - + + + + + + + C:\Users\ramesh\AppData\Roaming\Subversion @@ -118,13 +140,20 @@ + + diff --git a/base_test.py b/base_test.py new file mode 100644 index 0000000..fc15f64 --- /dev/null +++ b/base_test.py @@ -0,0 +1,134 @@ +import argparse + +from config import WORKDIR, NODE, PT_BASEDIR, BASEDIR, SERVER, PXC_LOWER_BASE, PXC_UPPER_BASE +from util import pxc_startup, ps_startup +from util.utility import * + +workdir = WORKDIR +pt_basedir = PT_BASEDIR +server = SERVER + +# Read argument +parser = argparse.ArgumentParser(prog='PXC test', usage='%(prog)s [options]') +parser.add_argument('-e', '--encryption-run', action='store_true', + help='This option will enable encryption options') +parser.add_argument('-d', '--debug', action='store_true', + help='This option will enable debug logging') + +args = parser.parse_args() +encryption = 'NO' +if args.encryption_run is True: + encryption = 'YES' + +debug = 'NO' +if args.debug is True: + debug = 'YES' + +utility_cmd = Utility(debug) +utility_cmd.check_python_version() +version = utility_cmd.version_check(BASEDIR) +lower_version = get_mysql_version(PXC_LOWER_BASE) +upper_version = get_mysql_version(PXC_UPPER_BASE) +low_version_num = utility_cmd.version_check(PXC_LOWER_BASE) +high_version_num = utility_cmd.version_check(PXC_UPPER_BASE) +db = "test" + + +class BaseTest: + def __init__(self, number_of_nodes: int = int(NODE), + wsrep_provider_options=None, + my_extra=None, + extra_config_file=None, + init_extra=None, + vers: Version = None, + encrypt: bool = False, + ssl: bool = False): + self.__number_of_nodes = number_of_nodes + self.__my_extra = my_extra + self.__wsrep_provider_options = wsrep_provider_options + self.__extra_config_file = extra_config_file + self.__init_extra = init_extra + self.__version = vers + self.encrypt = encrypt + self.ssl = ssl + self.pxc_nodes: list[DbConnection] = None + self.node1: DbConnection = None + self.node2: DbConnection = None + self.node3: DbConnection = None + self.ps_nodes: list[DbConnection] = None + + def start_pxc(self, my_extra: str = None, custom_conf_settings: dict = None, + terminate_on_startup_failure: bool = True): + if my_extra is not None: + my_extra_options = my_extra + else: + my_extra_options = self.__my_extra + + # Start PXC cluster for ChaosMonkey test + server_startup = pxc_startup.StartCluster(self.__number_of_nodes, debug, self.__version) + server_startup.sanity_check() + if encryption == 'YES' or self.encrypt: + server_startup.create_config('encryption', self.__wsrep_provider_options, + custom_conf_settings=custom_conf_settings) + elif self.ssl: + server_startup.create_config('ssl', self.__wsrep_provider_options, + custom_conf_settings=custom_conf_settings) + else: + server_startup.create_config('none', self.__wsrep_provider_options, + custom_conf_settings=custom_conf_settings) + server_startup.initialize_cluster() + if self.__extra_config_file is not None: + server_startup.add_myextra_configuration(self.__extra_config_file) + self.pxc_nodes = server_startup.start_cluster(my_extra_options, terminate_on_startup_failure) + if len(self.pxc_nodes) == self.__number_of_nodes: + self.node1 = self.pxc_nodes[0] + self.node2 = self.pxc_nodes[1] + self.node3 = self.pxc_nodes[2] + self.node1.test_connection_check() + else: + print("Some problem while setting up cluster nodes. Not all nodes seems in healthy state") + print("Number of nodes: " + str(len(self.pxc_nodes))) + if terminate_on_startup_failure: + exit(1) + if debug == 'YES': + for node in self.pxc_nodes: + print("node is " + node.get_socket()) + # atexit.register(shutdown_nodes(self.node)) + + def start_ps(self, my_extra=None): + """ Start Percona Server. This method will + perform sanity checks for PS startup + """ + # Start PXC cluster for replication test + if my_extra is not None: + my_extra_options = my_extra + else: + my_extra_options = self.__my_extra + server_startup = ps_startup.StartPerconaServer(self.__number_of_nodes, debug, self.__version) + server_startup.test_sanity_check() + if encryption == 'YES': + server_startup.create_config('encryption') + else: + server_startup.create_config() + server_startup.initialize_server() + if self.__extra_config_file is not None: + server_startup.add_myextra_configuration(self.__extra_config_file) + self.ps_nodes = server_startup.start_server(my_extra_options) + + def shutdown_nodes(self, nodes=None): + if nodes is None: + nodes = self.pxc_nodes + for node in nodes: + node.shutdown() + + def set_extra_conf_file(self, conf_file): + self.__extra_config_file = conf_file + + def set_wsrep_provider_options(self, options): + self.__wsrep_provider_options = options + + def set_number_of_nodes(self, number_of_nodes: int): + self.__number_of_nodes = number_of_nodes + + def get_number_of_nodes(self): + return self.__number_of_nodes diff --git a/config.ini b/config.ini index 95b569e..32fc97f 100644 --- a/config.ini +++ b/config.ini @@ -4,9 +4,6 @@ basedir = /dev/shm/qa/PXC_tarball server=pxc node = 3 user = root -ps1_socket = /tmp/psnode1.sock -ps2_socket = /tmp/psnode2.sock -ps3_socket = /tmp/psnode3.sock pt_basedir = /dev/shm/qa/percona-toolkit-3.0.10 pstress_bin = /dev/shm/qa/pstress/src/pstress-pxc pstress_grammar_file = /dev/shm/qa/pstress/src/grammar.sql diff --git a/config.py b/config.py index b888771..a5cda24 100644 --- a/config.py +++ b/config.py @@ -9,9 +9,6 @@ SERVER = config['config']['server'] NODE = config['config']['node'] USER = config['config']['user'] -PS1_SOCKET = config['config']['ps1_socket'] -PS2_SOCKET = config['config']['ps2_socket'] -PS3_SOCKET = config['config']['ps3_socket'] PT_BASEDIR = config['config']['pt_basedir'] PSTRESS_BIN = config['config']['pstress_bin'] PSTRESS_GRAMMAR_FILE = config['config']['pstress_grammar_file'] diff --git a/pstress/pstress-run.py b/pstress/pstress-run.py index 9d1b360..3b40071 100644 --- a/pstress/pstress-run.py +++ b/pstress/pstress-run.py @@ -3,11 +3,13 @@ import sys import configparser import datetime + cwd = os.path.dirname(os.path.realpath(__file__)) parent_dir = os.path.normpath(os.path.join(cwd, '../../')) sys.path.insert(0, parent_dir) from util import utility -utility_cmd = utility.Utility() +from base_test import * +utility_cmd = utility.Utility(debug) utility_cmd.check_python_version() # Reading initial configuration @@ -39,38 +41,15 @@ PRIMARY_KEY_PROBABLITY = config['pstress']['primary_key_probablity'] -class PstressRun: - def printit(self, text): - now = datetime.now().strftime("%H:%M:%S ") - print(now + ' ' + f'{text:100}') - - def start_server(self, node): - self.printit("Generating PXC data directory template") - if SERVER == "pxc": - utility_cmd.start_pxc(parent_dir, WORKDIR, BASEDIR, node, - WORKDIR + '/node1/mysql.sock', USER, ENCRYPTION, MY_EXTRA) - self.printit("3 Node PXC Cluster started ok. Clients:") - elif SERVER == "ps": - utility_cmd.start_ps(parent_dir, WORKDIR, BASEDIR, node, - WORKDIR + '/psnode1/mysql.sock', USER, ENCRYPTION, MY_EXTRA) - - def stop_server(self, node): - if SERVER == "pxc": - for i in range(1, NODE + 1): - shutdown_node = BASEDIR + '/bin/mysqladmin --user=root --socket=' + \ - WORKDIR + '/node' + str(i) + '/mysql.sock shutdown > /dev/null 2>&1' - result = os.system(shutdown_node) - utility_cmd.check_testcase(result, "Shutdown cluster node for crash recovery") - elif SERVER == "ps": - utility_cmd.start_ps(parent_dir, WORKDIR, BASEDIR, node, - WORKDIR + '/psnode1/mysql.sock', USER, ENCRYPTION, MY_EXTRA) +class PstressRun(BaseTest): + def __init__(self): + super().__init__() -print("-------------------------") -print("\nPXC pstress run ") -print("-------------------------") +utility.test_header("PXC pstress run") pstress_run = PstressRun() -if SERVER == "pxc": - pstress_run.start_server(NODE) -elif SERVER == "ps": - pstress_run.start_server(1) +if server == "pxc": + pstress_run.start_pxc() +elif server == "ps": + pstress_run.set_number_of_nodes(1) + pstress_run.start_ps() diff --git a/suite/correctness/chaosmonkey-test.py b/suite/correctness/chaosmonkey-test.py index 7cdf2b1..d278b61 100755 --- a/suite/correctness/chaosmonkey-test.py +++ b/suite/correctness/chaosmonkey-test.py @@ -1,129 +1,61 @@ #!/usr/bin/env python3 import os import sys -import argparse import random -import time + cwd = os.path.dirname(os.path.realpath(__file__)) parent_dir = os.path.normpath(os.path.join(cwd, '../../')) sys.path.insert(0, parent_dir) -from config import * -from util import pxc_startup -from util import db_connection +from base_test import * from util import sysbench_run from util import utility -# Read argument -parser = argparse.ArgumentParser(prog='PXC chaosmonkey test', usage='%(prog)s [options]') -parser.add_argument('-e', '--encryption-run', action='store_true', - help='This option will enable encryption options') -parser.add_argument('-d', '--debug', action='store_true', - help='This option will enable debug logging') - -args = parser.parse_args() -if args.encryption_run is True: - encryption = 'YES' -else: - encryption = 'NO' -if args.debug is True: - debug = 'YES' -else: - debug = 'NO' # Initial configuration -node = '6' +number_of_nodes = 6 -utility_cmd = utility.Utility(debug) -utility_cmd.check_python_version() +class ChaosMonkeyQA(BaseTest): + def __init__(self): + super().__init__(number_of_nodes) -class ChaosMonkeyQA: - def startup(self): - # Start PXC cluster for ChaosMonkey test - dbconnection_check = db_connection.DbConnection(USER, WORKDIR + '/node1/mysql.sock') - server_startup = pxc_startup.StartCluster(parent_dir, WORKDIR, BASEDIR, int(node), debug) - result = server_startup.sanity_check() - utility_cmd.check_testcase(result, "Startup sanity check") - if encryption == 'YES': - result = server_startup.create_config('encryption') - utility_cmd.check_testcase(result, "Configuration file creation") - else: - result = server_startup.create_config('none') - utility_cmd.check_testcase(result, "Configuration file creation") - result = server_startup.initialize_cluster() - utility_cmd.check_testcase(result, "Initializing cluster") - result = server_startup.start_cluster() - utility_cmd.check_testcase(result, "Cluster startup") - result = dbconnection_check.connection_check() - utility_cmd.check_testcase(result, "Database connection") - - def sysbench_run(self, socket, db): + def sysbench_run(self): # Sysbench dataload for consistency test - sysbench = sysbench_run.SysbenchRun(BASEDIR, WORKDIR, - socket, debug) + sysbench = sysbench_run.SysbenchRun(self.node1, debug) - result = sysbench.sanity_check(db) - utility_cmd.check_testcase(result, "Sysbench run sanity check") - result = sysbench.sysbench_load(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, SYSBENCH_NORMAL_TABLE_SIZE) - utility_cmd.check_testcase(result, "Sysbench data load") + sysbench.test_sanity_check(db) + sysbench.test_sysbench_load(db) if encryption == 'YES': - for i in range(1, int(SYSBENCH_TABLE_COUNT) + 1): - encrypt_table = BASEDIR + '/bin/mysql --user=root ' \ - '--socket=' + socket + ' -e "' \ - ' alter table ' + db + '.sbtest' + str(i) + \ - " encryption='Y'" \ - '"; > /dev/null 2>&1' - if debug == 'YES': - print(encrypt_table) - os.system(encrypt_table) - result = sysbench.sysbench_oltp_read_write(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, - SYSBENCH_NORMAL_TABLE_SIZE, SYSBENCH_RUN_TIME, 'Yes') - utility_cmd.check_testcase(result, "Initiated sysbench oltp run") + sysbench.encrypt_sysbench_tables(db) + sysbench.test_sysbench_oltp_read_write(db, background=True) def multi_recovery_test(self): """ This method will kill 2 random nodes from 6 node cluster while sysbench is in progress and check data consistency after restart. """ - nodes = [2, 3, 4, 5, 6] - rand_nodes = random.choices(nodes, k=2) - self.sysbench_run(WORKDIR + '/node1/mysql.sock', 'test') - query = 'pidof sysbench' - sysbench_pid = os.popen(query).read().rstrip() - for j in rand_nodes: - query = 'cat `' + BASEDIR + \ - '/bin/mysql --user=root --socket=' + WORKDIR + \ - '/node' + str(j) + '/mysql.sock -Bse"select @@pid_file" 2>&1`' - time.sleep(1) - pid = os.popen(query).read().rstrip() - if debug == 'YES': - print("Terminating mysqld : " + 'kill -9 ' + pid) - result = os.system('kill -9 ' + pid) - utility_cmd.check_testcase(result, "Killed Cluster Node" + str(j) + " for ChaosMonkey QA") - - kill_sysbench = "kill -9 " + sysbench_pid + nodes = self.pxc_nodes + nodes.remove(self.node1) + rand_nodes = random.sample(nodes, 2) + # random.choices(nodes, k=2) if debug == 'YES': - print("Terminating sysbench run : " + kill_sysbench) - result = os.system(kill_sysbench) - utility_cmd.check_testcase(result, "Killed sysbench oltp run") + print("Random nodes selected:") + for n in rand_nodes: + print(str(n)) + self.sysbench_run() + sysbench_pid = utility.sysbench_pid() + for j in rand_nodes: + utility_cmd.kill_cluster_node(j) + utility_cmd.kill_process(sysbench_pid, "sysbench otlp run") time.sleep(10) for j in rand_nodes: - query = 'bash ' + WORKDIR + \ - '/log/startup' + str(j) + '.sh' - if debug == 'YES': - print(query) - result = os.system(query) - utility_cmd.check_testcase(result, "Restarting Cluster Node" + str(j)) + utility_cmd.restart_and_check_node(j) time.sleep(5) -print("\nPXC ChaosMonkey Style test") -print("----------------------------") +utility.test_header("PXC ChaosMonkey Style test") chaosmonkey_qa = ChaosMonkeyQA() -chaosmonkey_qa.startup() +chaosmonkey_qa.start_pxc() chaosmonkey_qa.multi_recovery_test() -version = utility_cmd.version_check(BASEDIR) time.sleep(10) -result = utility_cmd.check_table_count(BASEDIR, 'test', WORKDIR + '/node1/mysql.sock', - WORKDIR + '/node2/mysql.sock') -utility_cmd.check_testcase(result, "Checksum run for DB: test") +utility_cmd.test_table_count(chaosmonkey_qa.node1, chaosmonkey_qa.node2, db) diff --git a/suite/correctness/cluster_interaction.py b/suite/correctness/cluster_interaction.py index 174fb8e..1b1ada0 100755 --- a/suite/correctness/cluster_interaction.py +++ b/suite/correctness/cluster_interaction.py @@ -1,120 +1,38 @@ #!/usr/bin/env python3 import os import sys -import argparse -import time -import subprocess + cwd = os.path.dirname(os.path.realpath(__file__)) parent_dir = os.path.normpath(os.path.join(cwd, '../../')) sys.path.insert(0, parent_dir) from config import * -from util import pxc_startup -from util import db_connection from util import sysbench_run from util import utility +from base_test import * -# Read argument -parser = argparse.ArgumentParser(prog='PXC cluster interaction test', usage='%(prog)s [options]') -parser.add_argument('-e', '--encryption-run', action='store_true', - help='This option will enable encryption options') -parser.add_argument('-d', '--debug', action='store_true', - help='This option will enable debug logging') -args = parser.parse_args() -if args.encryption_run is True: - encryption = 'YES' -else: - encryption = 'NO' -if args.debug is True: - debug = 'YES' -else: - debug = 'NO' - -utility_cmd = utility.Utility(debug) -utility_cmd.check_python_version() -version = utility_cmd.version_check(BASEDIR) +def run_query(query): + query_status = os.system(query) + if int(query_status) != 0: + print("ERROR! Query execution failed: " + query) + return 1 + return 0 -class ClusterInteraction: - def __init__(self, basedir, workdir, user, node1_socket, pt_basedir, node): - self.workdir = workdir - self.basedir = basedir - self.user = user - self.socket = node1_socket - self.pt_basedir = pt_basedir - self.node = node +class ClusterInteraction(BaseTest): + def __init__(self): + super().__init__() - def run_query(self, query): - query_status = os.system(query) - if int(query_status) != 0: - print("ERROR! Query execution failed: " + query) - return 1 - return 0 - - def start_pxc(self): - # Start PXC cluster for replication test - dbconnection_check = db_connection.DbConnection(USER, WORKDIR + '/node1/mysql.sock') - server_startup = pxc_startup.StartCluster(parent_dir, WORKDIR, BASEDIR, int(self.node), debug) - result = server_startup.sanity_check() - utility_cmd.check_testcase(result, "Startup sanity check") - if encryption == 'YES': - result = server_startup.create_config( - 'encryption', 'gcache.keep_pages_size=5;gcache.page_size=1024M;gcache.size=1024M;') - utility_cmd.check_testcase(result, "Configuration file creation") - else: - result = server_startup.create_config( - 'none', 'gcache.keep_pages_size=5;gcache.page_size=1024M;gcache.size=1024M;') - utility_cmd.check_testcase(result, "Configuration file creation") - result = server_startup.initialize_cluster() - utility_cmd.check_testcase(result, "Initializing cluster") - result = server_startup.start_cluster() - utility_cmd.check_testcase(result, "Cluster startup") - result = dbconnection_check.connection_check() - utility_cmd.check_testcase(result, "Database connection") - - def sysbench_run(self, socket, db, background_run=None): + def sysbench_run(self, db, background_run=False): # Sysbench dataload for cluster interaction test - if background_run is not None: - background_run = 'Yes' - else: - background_run = 'No' - - sysbench = sysbench_run.SysbenchRun(BASEDIR, WORKDIR, - socket, debug) - - result = sysbench.sanity_check(db) - utility_cmd.check_testcase(result, "Sysbench run sanity check") - result = sysbench.sysbench_load(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, SYSBENCH_NORMAL_TABLE_SIZE) - utility_cmd.check_testcase(result, "Sysbench data load") + sysbench = sysbench_run.SysbenchRun(self.node1, debug) + sysbench.test_sanity_check(db) + sysbench.test_sysbench_load(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, SYSBENCH_NORMAL_TABLE_SIZE) if encryption == 'YES': - for i in range(1, int(SYSBENCH_TABLE_COUNT) + 1): - encrypt_table = BASEDIR + '/bin/mysql --user=root ' \ - '--socket=' + socket + ' -e "' \ - ' alter table ' + db + '.sbtest' + str(i) + \ - " encryption='Y'" \ - '"; > /dev/null 2>&1' - if debug == 'YES': - print(encrypt_table) - os.system(encrypt_table) - - if background_run == "Yes": - result = sysbench.sysbench_oltp_read_write(db, SYSBENCH_TABLE_COUNT, SYSBENCH_TABLE_COUNT, + sysbench.encrypt_sysbench_tables(db) + if background_run: + sysbench.test_sysbench_oltp_read_write(db, SYSBENCH_TABLE_COUNT, SYSBENCH_TABLE_COUNT, SYSBENCH_NORMAL_TABLE_SIZE, SYSBENCH_RUN_TIME, background_run) - utility_cmd.check_testcase(result, "Initiated sysbench oltp run") - - def startup_check(self, cluster_node): - """ This method will check the node - startup status. - """ - ping_query = self.basedir + '/bin/mysqladmin --user=root --socket=' + \ - WORKDIR + '/node' + cluster_node + '/mysql.sock ping > /dev/null 2>&1' - for startup_timer in range(120): - time.sleep(1) - ping_check = subprocess.call(ping_query, shell=True, stderr=subprocess.DEVNULL) - ping_status = ("{}".format(ping_check)) - if int(ping_status) == 0: - utility_cmd.check_testcase(int(ping_status), "Cluster restart is successful") - break # break the loop if mysqld is running def cluster_interaction_qa(self): """ This method will help us to test cluster @@ -123,76 +41,47 @@ def cluster_interaction_qa(self): 2) IST 3) Node joining """ - self.sysbench_run(self.socket, 'test', 'background_run') - query = 'pidof sysbench' - sysbench_pid = os.popen(query).read().rstrip() + self.sysbench_run(db, True) + sysbench_pid = utility.sysbench_pid() if int(version) > int("050700"): utility_cmd.check_testcase(0, "Initiating flow control test") - for j in range(1, int(self.node) + 1): - query = self.basedir + "/bin/mysql --user=root --socket=" + \ - self.socket + ' -e"set global pxc_strict_mode=DISABLED;' \ - '" > /dev/null 2>&1' - if debug == 'YES': - print(query) - self.run_query(query) - query = self.basedir + \ - '/bin/mysql ' \ - ' --user=root --socket=' + WORKDIR + '/node1/mysql.sock test' \ - ' -Bse"flush table sbtest1 with read lock;' \ - 'select sleep(120);unlock tables" 2>&1 &' - if debug == 'YES': - print(query) - os.system(query) - flow_control_status = 'OFF' - while flow_control_status != 'OFF': - query = self.basedir + \ - '/bin/mysql --user=root --socket=' + WORKDIR + '/node1/mysql.sock' \ - ' -Bse"show status like ' \ - "'wsrep_flow_control_status';" + '"' \ - "| awk '{ print $2 }' 2>/dev/null" - flow_control_status = os.popen(query).read().rstrip() - time.sleep(1) + #for j in range(1, 2): + self.node1.execute("set global pxc_strict_mode=DISABLED") + queries = ["flush table " + db + ".sbtest1 with read lock", + "select sleep(120)", + "unlock tables"] + self.node1.execute_queries(queries) + flow_control_status = 'OFF' + count = 1 + while flow_control_status != 'OFF': + if count > 30: + print("flow control status isn't OFF, its = " + flow_control_status) + break + flow_control_status = self.node1.execute_get_row("show status like wsrep_flow_control_status")[1] + time.sleep(1) + count = count + 1 utility_cmd.check_testcase(0, "Initiating IST test") - shutdown_node = self.basedir + '/bin/mysqladmin --user=root --socket=' + \ - WORKDIR + '/node' + self.node + '/mysql.sock shutdown > /dev/null 2>&1' - if debug == 'YES': - print(shutdown_node) - result = os.system(shutdown_node) - utility_cmd.check_testcase(result, "Shutdown cluster node IST test") - time.sleep(15) - kill_sysbench = "kill -9 " + sysbench_pid + " > /dev/null 2>&1" - if debug == 'YES': - print("Terminating sysbench run : " + kill_sysbench) - os.system(kill_sysbench) - ist_startup = "bash " + self.workdir + \ - '/log/startup' + str(self.node) + '.sh' - if debug == 'YES': - print(ist_startup) - os.system(ist_startup) - self.startup_check(self.node) - kill_sysbench = "kill -9 " + sysbench_pid + " > /dev/null 2>&1" if debug == 'YES': - print("Terminating sysbench run : " + kill_sysbench) - os.system(kill_sysbench) + print("shutdown node3") + self.node3.shutdown() + time.sleep(30) + utility_cmd.kill_process(sysbench_pid, "sysbench run") + utility_cmd.restart_cluster_node(self.node3) + utility_cmd.startup_check(self.node3) + utility_cmd.kill_process(sysbench_pid, "sysbench run", True) utility_cmd.check_testcase(0, "Initiating Node joining test") - self.sysbench_run(self.socket, 'test_one') - self.sysbench_run(self.socket, 'test_two') - self.sysbench_run(self.socket, 'test_three') - utility_cmd.node_joiner(self.workdir, self.basedir, str(3), str(4)) + self.sysbench_run('test_one') + self.sysbench_run('test_two') + self.sysbench_run('test_three') + pxc_startup.StartCluster.join_new_node(self.node3, 4, debug=debug) -cluster_interaction = ClusterInteraction(BASEDIR, WORKDIR, USER, - WORKDIR + '/node1/mysql.sock', PT_BASEDIR, NODE) -print('----------------------------------------------') -print('Cluster interaction QA using flow control test') -print('----------------------------------------------') +cluster_interaction = ClusterInteraction() +utility.test_header('Cluster interaction QA using flow control test') cluster_interaction.start_pxc() cluster_interaction.cluster_interaction_qa() -version = utility_cmd.version_check(BASEDIR) time.sleep(5) -result = utility_cmd.check_table_count(BASEDIR, 'test', WORKDIR + '/node1/mysql.sock', - WORKDIR + '/node2/mysql.sock') -utility_cmd.check_testcase(result, "Checksum run for DB: test") +utility_cmd.test_table_count(cluster_interaction.node1, cluster_interaction.node2, db) diff --git a/suite/correctness/consistency_check.py b/suite/correctness/consistency_check.py index 9fb52d9..3f4d037 100755 --- a/suite/correctness/consistency_check.py +++ b/suite/correctness/consistency_check.py @@ -1,130 +1,50 @@ #!/usr/bin/env python3 import os import sys -import argparse -import time + cwd = os.path.dirname(os.path.realpath(__file__)) parent_dir = os.path.normpath(os.path.join(cwd, '../../')) sys.path.insert(0, parent_dir) +from base_test import * from config import * -from util import pxc_startup -from util import db_connection from util import sysbench_run from util import utility -from util import createsql +from util import executesql from util import rqg_datagen -# Read argument -parser = argparse.ArgumentParser(prog='PXC consistency test', usage='%(prog)s [options]') -parser.add_argument('-e', '--encryption-run', action='store_true', - help='This option will enable encryption options') -parser.add_argument('-d', '--debug', action='store_true', - help='This option will enable debug logging') -args = parser.parse_args() -if args.encryption_run is True: - encryption = 'YES' -else: - encryption = 'NO' -if args.debug is True: - debug = 'YES' -else: - debug = 'NO' - -utility_cmd = utility.Utility(debug) -utility_cmd.check_python_version() sysbench_run_time = 10 -class ConsistencyCheck: - def __init__(self, basedir, workdir, user, node1_socket, pt_basedir, node): - self.workdir = workdir - self.basedir = basedir - self.user = user - self.socket = node1_socket - self.pt_basedir = pt_basedir - self.node = node - - def run_query(self, query): - query_status = os.system(query) - if int(query_status) != 0: - print("ERROR! Query execution failed: " + query) - return 1 - return 0 +class ConsistencyCheck(BaseTest): + def __init__(self): + super().__init__() - def start_pxc(self): - # Start PXC cluster for replication test - dbconnection_check = db_connection.DbConnection(USER, WORKDIR + '/node1/mysql.sock') - server_startup = pxc_startup.StartCluster(parent_dir, WORKDIR, BASEDIR, int(NODE), debug) - result = server_startup.sanity_check() - utility_cmd.check_testcase(result, "Startup sanity check") - if encryption == 'YES': - result = server_startup.create_config('encryption') - utility_cmd.check_testcase(result, "Configuration file creation") - else: - result = server_startup.create_config('none') - utility_cmd.check_testcase(result, "Configuration file creation") - result = server_startup.initialize_cluster() - utility_cmd.check_testcase(result, "Initializing cluster") - result = server_startup.start_cluster() - utility_cmd.check_testcase(result, "Cluster startup") - result = dbconnection_check.connection_check() - utility_cmd.check_testcase(result, "Database connection") - - def sysbench_run(self, socket, db): + def sysbench_run(self): # Sysbench dataload for consistency test - sysbench = sysbench_run.SysbenchRun(BASEDIR, WORKDIR, - socket, debug) + sysbench = sysbench_run.SysbenchRun(self.node1, debug) - result = sysbench.sanity_check(db) - utility_cmd.check_testcase(result, "Sysbench run sanity check") - result = sysbench.sysbench_load(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, SYSBENCH_NORMAL_TABLE_SIZE) - utility_cmd.check_testcase(result, "Sysbench data load") + sysbench.test_sanity_check(db) + sysbench.test_sysbench_load(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, SYSBENCH_NORMAL_TABLE_SIZE) if encryption == 'YES': - for i in range(1, int(SYSBENCH_TABLE_COUNT) + 1): - encrypt_table = BASEDIR + '/bin/mysql --user=root ' \ - '--socket=' + socket + ' -e "' \ - ' alter table ' + db + '.sbtest' + str(i) + \ - " encryption='Y'" \ - '"; > /dev/null 2>&1' - if debug == 'YES': - print(encrypt_table) - os.system(encrypt_table) + sysbench.encrypt_sysbench_tables(db) - def data_load(self, db, socket): + def data_load(self, load_db): # Random dataload for consistency test - if os.path.isfile(parent_dir + '/util/createsql.py'): - generate_sql = createsql.GenerateSQL('/tmp/dataload.sql', 1000) - generate_sql.OutFile() - generate_sql.CreateTable() - sys.stdout = sys.__stdout__ - create_db = self.basedir + "/bin/mysql --user=root --socket=" + \ - socket + ' -Bse"drop database if exists ' + db + \ - ';create database ' + db + ';" 2>&1' - if debug == 'YES': - print(create_db) - result = os.system(create_db) - utility_cmd.check_testcase(result, "Sample DB creation") - data_load_query = self.basedir + "/bin/mysql --user=root --socket=" + \ - socket + ' ' + db + ' -f < /tmp/dataload.sql >/dev/null 2>&1' - if debug == 'YES': - print(data_load_query) - result = os.system(data_load_query) - utility_cmd.check_testcase(result, "Sample data load") + self.node1.execute("CREATE DATABASE " + load_db) + execute_sql = executesql.GenerateSQL(self.node1, load_db, 1000) + execute_sql.create_table() + +load_db = 'pxc_dataload_db' -print("\nPXC data consistency test between nodes") -print("----------------------------------------") -consistency_run = ConsistencyCheck(BASEDIR, WORKDIR, USER, WORKDIR + '/node1/mysql.sock', PT_BASEDIR, NODE) -rqg_dataload = rqg_datagen.RQGDataGen(BASEDIR, WORKDIR, USER, debug) +utility.test_header("PXC data consistency test between nodes") +consistency_run = ConsistencyCheck() consistency_run.start_pxc() -consistency_run.sysbench_run(WORKDIR + '/node1/mysql.sock', 'test') -consistency_run.data_load('pxc_dataload_db', WORKDIR + '/node1/mysql.sock') -rqg_dataload.pxc_dataload(WORKDIR + '/node1/mysql.sock') -version = utility_cmd.version_check(BASEDIR) +rqg_dataload = rqg_datagen.RQGDataGen(consistency_run.node1, debug) +consistency_run.sysbench_run() +consistency_run.data_load(load_db) +rqg_dataload.pxc_dataload(workdir) time.sleep(5) -result = utility_cmd.check_table_count(BASEDIR, 'test', WORKDIR + '/node1/mysql.sock', - WORKDIR + '/node2/mysql.sock') -utility_cmd.check_testcase(result, "Checksum run for DB: test") -result = utility_cmd.check_table_count(BASEDIR, 'pxc_dataload_db', WORKDIR + '/node1/mysql.sock', - WORKDIR + '/node2/mysql.sock') -utility_cmd.check_testcase(result, "Checksum run for DB: pxc_dataload_db") +utility_cmd.test_table_count(consistency_run.node1, consistency_run.node2, db) +utility_cmd.test_table_count(consistency_run.node1, consistency_run.node2, db) +utility_cmd.test_table_count(consistency_run.node1, consistency_run.node2, load_db) diff --git a/suite/correctness/crash_recovery.py b/suite/correctness/crash_recovery.py index 082973e..a2e5bfe 100755 --- a/suite/correctness/crash_recovery.py +++ b/suite/correctness/crash_recovery.py @@ -1,122 +1,40 @@ #!/usr/bin/env python3 import os import sys -import argparse -import time -import subprocess + cwd = os.path.dirname(os.path.realpath(__file__)) parent_dir = os.path.normpath(os.path.join(cwd, '../../')) sys.path.insert(0, parent_dir) +from base_test import * from config import * -from util import pxc_startup -from util import db_connection from util import sysbench_run from util import utility from util import table_checksum -# Read argument -parser = argparse.ArgumentParser(prog='PXC crash recovery test', usage='%(prog)s [options]') -parser.add_argument('-e', '--encryption-run', action='store_true', - help='This option will enable encryption options') -parser.add_argument('-d', '--debug', action='store_true', - help='This option will enable debug logging') -args = parser.parse_args() -if args.encryption_run is True: - encryption = 'YES' -else: - encryption = 'NO' -if args.debug is True: - debug = 'YES' -else: - debug = 'NO' - -utility_cmd = utility.Utility(debug) -utility_cmd.check_python_version() +def restart_node_check_recovery_status(cluster_node: DbConnection): + """ This method will check the node recovery + startup status. + """ + utility_cmd.restart_cluster_node(cluster_node) + utility_cmd.startup_check(cluster_node) + utility_cmd.wait_for_wsrep_status(cluster_node) -class CrashRecovery: - def __init__(self, basedir, workdir, user, node1_socket, pt_basedir, node): - self.workdir = workdir - self.basedir = basedir - self.user = user - self.socket = node1_socket - self.pt_basedir = pt_basedir - self.node = node - def run_query(self, query): - query_status = os.system(query) - if int(query_status) != 0: - print("ERROR! Query execution failed: " + query) - return 1 - return 0 - - def start_pxc(self): - # Start PXC cluster for replication test - dbconnection_check = db_connection.DbConnection(USER, WORKDIR + '/node1/mysql.sock') - server_startup = pxc_startup.StartCluster(parent_dir, WORKDIR, BASEDIR, int(self.node), debug) - result = server_startup.sanity_check() - utility_cmd.check_testcase(result, "Startup sanity check") - if encryption == 'YES': - result = server_startup.create_config('encryption') - utility_cmd.check_testcase(result, "Configuration file creation") - else: - result = server_startup.create_config('none') - utility_cmd.check_testcase(result, "Configuration file creation") - result = server_startup.initialize_cluster() - utility_cmd.check_testcase(result, "Initializing cluster") - result = server_startup.start_cluster() - utility_cmd.check_testcase(result, "Cluster startup") - result = dbconnection_check.connection_check() - utility_cmd.check_testcase(result, "Database connection") +class CrashRecovery(BaseTest): + def __init__(self): + super().__init__() - def sysbench_run(self, socket, db): + def sysbench_run(self): # Sysbench dataload for consistency test - sysbench = sysbench_run.SysbenchRun(BASEDIR, WORKDIR, - WORKDIR + '/node1/mysql.sock', debug) + sysbench = sysbench_run.SysbenchRun(self.node1, debug) - result = sysbench.sanity_check(db) - utility_cmd.check_testcase(result, "Sysbench run sanity check") - result = sysbench.sysbench_load(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, SYSBENCH_NORMAL_TABLE_SIZE) - utility_cmd.check_testcase(result, "Sysbench data load") + sysbench.test_sanity_check(db) + sysbench.test_sysbench_load(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, SYSBENCH_NORMAL_TABLE_SIZE) if encryption == 'YES': - for i in range(1, int(SYSBENCH_TABLE_COUNT) + 1): - encrypt_table = BASEDIR + '/bin/mysql --user=root ' \ - '--socket=' + socket + ' -e "' \ - ' alter table ' + db + '.sbtest' + str(i) + \ - " encryption='Y'" \ - '"; > /dev/null 2>&1' - if debug == 'YES': - print(encrypt_table) - os.system(encrypt_table) - result = sysbench.sysbench_oltp_read_write(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, - SYSBENCH_NORMAL_TABLE_SIZE, SYSBENCH_RUN_TIME, 'Yes') - utility_cmd.check_testcase(result, "Initiated sysbench oltp run") - - def startup_check(self, cluster_node): - """ This method will check the node recovery - startup status. - """ - recovery_startup = "bash " + self.workdir + \ - '/log/startup' + str(cluster_node) + '.sh' - if debug == 'YES': - print(recovery_startup) - os.system(recovery_startup) - ping_query = self.basedir + '/bin/mysqladmin --user=root --socket=' + \ - WORKDIR + '/node' + cluster_node + '/mysql.sock ping > /dev/null 2>&1' - for startup_timer in range(120): - time.sleep(1) - ping_check = subprocess.call(ping_query, shell=True, stderr=subprocess.DEVNULL) - ping_status = ("{}".format(ping_check)) - if int(ping_status) == 0: - wsrep_status = "" - while wsrep_status != "Synced": - status_query = self.basedir + '/bin/mysql --user=root --socket=' + \ - WORKDIR + '/node' + cluster_node + \ - '/mysql.sock -Bse"show status like ' \ - "'wsrep_local_state_comment'\" 2>&1 | awk \'{print $2}\'" - wsrep_status = os.popen(status_query).read().rstrip() - utility_cmd.check_testcase(int(ping_status), "Cluster recovery is successful") - break # break the loop if mysqld is running + sysbench.encrypt_sysbench_tables(db) + sysbench.test_sysbench_oltp_read_write(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, + SYSBENCH_NORMAL_TABLE_SIZE, SYSBENCH_RUN_TIME, True) def crash_recovery(self, test_name): """ This method will help us to test crash @@ -127,96 +45,56 @@ def crash_recovery(self, test_name): 3) Abnormal restart (multiple restart) while active data load in primary node """ - self.sysbench_run(self.socket, 'test') - query = 'pidof sysbench' - sysbench_pid = os.popen(query).read().rstrip() - pid_list = [] + self.sysbench_run() + sysbench_pid = utility.sysbench_pid() if test_name == "with_force_kill": - for j in range(1, int(self.node) + 1): - query = 'cat `' + self.basedir + '/bin/mysql ' \ - ' --user=root --socket=' + WORKDIR + \ - '/node' + str(j) + '/mysql.sock -Bse"select @@pid_file" 2>&1`' - pid_list += [os.popen(query).read().rstrip()] time.sleep(10) - kill_mysqld = "kill -9 " + pid_list[j - 1] - if debug == 'YES': - print("Terminating mysqld : " + kill_mysqld) - result = os.system(kill_mysqld) - utility_cmd.check_testcase(result, "Killed cluster node for crash recovery") + utility_cmd.kill_cluster_node(self.node3) time.sleep(5) - kill_sysbench = "kill -9 " + sysbench_pid - if debug == 'YES': - print("Terminating sysbench run : " + kill_sysbench) - os.system(kill_sysbench) - self.startup_check(self.node) + utility_cmd.kill_process(sysbench_pid, "sysbench") + restart_node_check_recovery_status(self.node3) elif test_name == "single_restart": - shutdown_node = self.basedir + '/bin/mysqladmin --user=root --socket=' + \ - WORKDIR + '/node' + self.node + '/mysql.sock shutdown > /dev/null 2>&1' + time.sleep(10) + result = self.node3.shutdown() + time.sleep(60) if debug == 'YES': - print(shutdown_node) - result = os.system(shutdown_node) + print("Shutdown node " + str(self.node3.get_node_number())) utility_cmd.check_testcase(result, "Shutdown cluster node for crash recovery") time.sleep(5) - kill_sysbench = "kill -9 " + sysbench_pid - if debug == 'YES': - print("Terminating sysbench run : " + kill_sysbench) - os.system(kill_sysbench) - self.startup_check(self.node) + utility_cmd.kill_process(sysbench_pid, "sysbench") + restart_node_check_recovery_status(self.node3) elif test_name == "multi_restart": for j in range(1, 3): - shutdown_node = self.basedir + '/bin/mysqladmin --user=root --socket=' + \ - WORKDIR + '/node' + self.node + '/mysql.sock shutdown > /dev/null 2>&1' + result = self.node3.shutdown() + time.sleep(60) if debug == 'YES': - print(shutdown_node) - result = os.system(shutdown_node) + print('Shutdown node' + str(self.node3.get_node_number())) utility_cmd.check_testcase(result, "Shutdown cluster node for crash recovery") - time.sleep(5) - self.startup_check(self.node) - query = 'pidof sysbench' - sysbench_pid = os.popen(query).read().rstrip() - time.sleep(5) + time.sleep(10) + restart_node_check_recovery_status(self.node3) + sysbench_pid = utility.sysbench_pid() if not sysbench_pid: - self.sysbench_run(self.socket, 'test') - query = 'pidof sysbench' - sysbench_pid = os.popen(query).read().rstrip() + self.sysbench_run() + sysbench_pid = utility.sysbench_pid() time.sleep(5) - kill_sysbench = "kill -9 " + sysbench_pid - if debug == 'YES': - print("Terminating sysbench run : " + kill_sysbench) - os.system(kill_sysbench) - else: - kill_sysbench = "kill -9 " + sysbench_pid - if debug == 'YES': - print("Terminating sysbench run : " + kill_sysbench) - os.system(kill_sysbench) + utility_cmd.kill_process(sysbench_pid, "sysbench", True) -crash_recovery_run = CrashRecovery(BASEDIR, WORKDIR, USER, WORKDIR + '/node1/mysql.sock', PT_BASEDIR, NODE) -checksum = table_checksum.TableChecksum(PT_BASEDIR, BASEDIR, WORKDIR, NODE, WORKDIR + '/node1/mysql.sock', debug) -version = utility_cmd.version_check(BASEDIR) -print('---------------------------------------------------') -print('Crash recovery QA using forceful mysqld termination') -print('---------------------------------------------------') +crash_recovery_run = CrashRecovery() +utility.test_header("Crash recovery QA using forceful mysqld termination") crash_recovery_run.start_pxc() +checksum = table_checksum.TableChecksum(crash_recovery_run.node1, workdir, pt_basedir, debug) crash_recovery_run.crash_recovery('with_force_kill') -result = utility_cmd.check_table_count(BASEDIR, 'test', WORKDIR + '/node1/mysql.sock', - WORKDIR + '/node2/mysql.sock') -utility_cmd.check_testcase(result, "Checksum run for DB: test") -print('-------------------------------') -print('Crash recovery QA using single restart') -print('-------------------------------') +utility_cmd.test_table_count(crash_recovery_run.node1, crash_recovery_run.node2, db) + +utility.test_header('Crash recovery QA using single restart') crash_recovery_run.start_pxc() crash_recovery_run.crash_recovery('single_restart') -result = utility_cmd.check_table_count(BASEDIR, 'test', WORKDIR + '/node1/mysql.sock', - WORKDIR + '/node2/mysql.sock') -utility_cmd.check_testcase(result, "Checksum run for DB: test") -print('----------------------------------------') -print('Crash recovery QA using multiple restart') -print('----------------------------------------') +utility_cmd.test_table_count(crash_recovery_run.node1, crash_recovery_run.node2, db) + +utility.test_header('Crash recovery QA using multiple restart') crash_recovery_run.start_pxc() crash_recovery_run.crash_recovery('multi_restart') time.sleep(10) -result = utility_cmd.check_table_count(BASEDIR, 'test', WORKDIR + '/node1/mysql.sock', - WORKDIR + '/node2/mysql.sock') -utility_cmd.check_testcase(result, "Checksum run for DB: test") +utility_cmd.test_table_count(crash_recovery_run.node1, crash_recovery_run.node2, db) diff --git a/suite/galera_sr/galera_basic_sr_qa.py b/suite/galera_sr/galera_basic_sr_qa.py old mode 100644 new mode 100755 index e59f3ba..3bff2c8 --- a/suite/galera_sr/galera_basic_sr_qa.py +++ b/suite/galera_sr/galera_basic_sr_qa.py @@ -1,94 +1,61 @@ #!/usr/bin/env python3 import os import sys -import argparse import itertools + cwd = os.path.dirname(os.path.realpath(__file__)) parent_dir = os.path.normpath(os.path.join(cwd, '../../')) sys.path.insert(0, parent_dir) +from base_test import * from config import * from util import sysbench_run from util import utility from util import table_checksum -# Read argument -parser = argparse.ArgumentParser(prog='PXC streaming replication test', usage='%(prog)s [options]') -parser.add_argument('-e', '--encryption-run', action='store_true', - help='This option will enable encryption options') -parser.add_argument('-d', '--debug', action='store_true', - help='This option will enable debug logging') -args = parser.parse_args() -if args.encryption_run is True: - encryption = 'YES' -else: - encryption = 'NO' -if args.debug is True: - debug = 'YES' -else: - debug = 'NO' - -utility_cmd = utility.Utility(debug) -utility_cmd.check_python_version() - -class StreamingReplication: - def start_server(self, node): - my_extra = "--innodb_buffer_pool_size=2G --innodb_log_file_size=1G" - utility_cmd.start_pxc(parent_dir, WORKDIR, BASEDIR, node, - WORKDIR + '/node1/mysql.sock', USER, encryption, my_extra) +class StreamingReplication(BaseTest): + def __init__(self): + super().__init__(my_extra="--innodb_buffer_pool_size=2G --innodb_log_file_size=1G") - def sysbench_run(self, socket, db): + def sysbench_run(self): # Sysbench data load - version = utility_cmd.version_check(BASEDIR) - checksum = "" if int(version) < int("080000"): - checksum = table_checksum.TableChecksum(PT_BASEDIR, BASEDIR, WORKDIR, NODE, socket, debug) - checksum.sanity_check() + checksum = table_checksum.TableChecksum(self.node1, workdir, pt_basedir, debug) + checksum.sanity_check(self.pxc_nodes) - sysbench = sysbench_run.SysbenchRun(BASEDIR, WORKDIR, socket, debug) - result = sysbench.sanity_check(db) - utility_cmd.check_testcase(result, "Sysbench run sanity check") - result = sysbench.sysbench_load(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, SYSBENCH_LOAD_TEST_TABLE_SIZE) - utility_cmd.check_testcase(result, "Sysbench data load (threads : " + str(SYSBENCH_THREADS) + ")") + sysbench = sysbench_run.SysbenchRun(self.node1, debug) + sysbench.test_sanity_check(db) + sysbench.test_sysbench_load(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, SYSBENCH_LOAD_TEST_TABLE_SIZE) - def streaming_replication_qa(self, socket, db): + def streaming_replication_qa(self): # Streaming Replication QA # Create data insert procedure - create_procedure = BASEDIR + "/bin/mysql --user=root --socket=" + socket + \ - ' ' + db + ' -Bse"source ' + cwd + '/sr_procedure.sql " 2>&1' if debug == 'YES': - print(create_procedure) - result = os.system(create_procedure) - utility_cmd.check_testcase(result, "Creating streaming replication data insert procedure") + print("Creating streaming replication data insert procedure from " + cwd + '/sr_procedure.sql') + self.node1.execute("DROP PROCEDURE IF EXISTS test.sr_procedure") + self.node1.execute_query_from_file(cwd + '/sr_procedure.sql') + wsrep_trx_fragment_unit = ['bytes', 'rows', 'statements'] - wsrep_trx_fragment_size = [1, 2, 4, 8, 16, 64, 128, 256, 512, 1024] - row_count = [100, 1000, 10000, 100000] + wsrep_trx_fragment_size = [128] # 1, 2, 4, 8, 16, 64, 128, 256, 512, 1024 + row_count = [100000] # 100, 1000, 10000 for trx_fragment_unit, trx_fragment_size, rows in \ itertools.product(wsrep_trx_fragment_unit, wsrep_trx_fragment_size, row_count): - sr_procedure = BASEDIR + "/bin/mysql --user=root --socket=" + socket + \ - ' -Bse"call ' + db + '.sr_procedure(' + str(rows) + \ - ",'" + trx_fragment_unit + "'," + \ - str(trx_fragment_size) + ')" 2>&1' if debug == 'YES': - print(sr_procedure) - result = os.system(sr_procedure) - sr_combination = "DML row count " + str(rows) + ", fragment_unit : " + \ - trx_fragment_unit + ", fragment_size : " + \ - str(trx_fragment_size) - utility_cmd.check_testcase(result, "SR testcase( " + sr_combination + " )") + print("call " + db + ".sr_procedure") + proc_args = [str(rows), trx_fragment_unit, str(trx_fragment_size)] + self.node1.call_proc(db + '.sr_procedure', proc_args) + + sr_combination = "DML row count " + proc_args[0] + ", fragment_unit : " + \ + proc_args[1] + ", fragment_size : " + proc_args[2] + utility_cmd.check_testcase(0, "SR testcase( " + sr_combination + " )") if trx_fragment_unit == 'bytes': - delete_rows = BASEDIR + "/bin/mysql --user=root --socket=" + socket + \ - ' ' + db + ' -Bse"delete from sbtest1 limit ' + str(rows) + ';" 2>&1' - if debug == 'YES': - print(delete_rows) - os.system(delete_rows) + delete_rows = "delete from " + db + ".sbtest1 limit " + str(rows) + self.node1.execute(delete_rows) -print("--------------------------------") -print("\nPXC Streaming Replication test") -print("--------------------------------") +utility.test_header("PXC Streaming Replication test") streaming_replication = StreamingReplication() -streaming_replication.start_server(NODE) -streaming_replication.sysbench_run(WORKDIR + '/node1/mysql.sock', 'test') -streaming_replication.streaming_replication_qa(WORKDIR + '/node1/mysql.sock', 'test') -utility_cmd.stop_pxc(WORKDIR, BASEDIR, NODE) +streaming_replication.start_pxc() +streaming_replication.sysbench_run() +streaming_replication.streaming_replication_qa() +streaming_replication.shutdown_nodes() diff --git a/suite/galera_sr/sr_procedure.sql b/suite/galera_sr/sr_procedure.sql index 300e582..b40de0b 100644 --- a/suite/galera_sr/sr_procedure.sql +++ b/suite/galera_sr/sr_procedure.sql @@ -1,6 +1,4 @@ -DROP PROCEDURE IF EXISTS sr_procedure; -DELIMITER // -CREATE PROCEDURE sr_procedure (IN row_count int(10), IN fragment_unit varchar(50), IN fragment_size int(10)) +CREATE PROCEDURE test.sr_procedure (IN row_count int(10), IN fragment_unit varchar(50), IN fragment_size int(10)) BEGIN DECLARE trx_statementsVar INT ; DECLARE trx_fragmentVar INT ; @@ -28,5 +26,4 @@ BEGIN END IF; END IF; COMMIT; -END // -DELIMITER ; +END diff --git a/suite/galera_sr/thread_pool_qa.py b/suite/galera_sr/thread_pool_qa.py old mode 100644 new mode 100755 index a8cc715..879b004 --- a/suite/galera_sr/thread_pool_qa.py +++ b/suite/galera_sr/thread_pool_qa.py @@ -1,110 +1,59 @@ #!/usr/bin/env python3 import os import sys -import argparse import itertools + cwd = os.path.dirname(os.path.realpath(__file__)) parent_dir = os.path.normpath(os.path.join(cwd, '../../')) sys.path.insert(0, parent_dir) +from base_test import * from config import * from util import sysbench_run from util import utility from util import table_checksum -from util import db_connection from util import pxc_startup -# Read argument -parser = argparse.ArgumentParser(prog='PXC thread pool test', usage='%(prog)s [options]') -parser.add_argument('-e', '--encryption-run', action='store_true', - help='This option will enable encryption options') -parser.add_argument('-d', '--debug', action='store_true', - help='This option will enable debug logging') -args = parser.parse_args() -if args.encryption_run is True: - encryption = 'YES' -else: - encryption = 'NO' -if args.debug is True: - debug = 'YES' -else: - debug = 'NO' - -utility_cmd = utility.Utility(debug) -utility_cmd.check_python_version() +class ThreadPooling(BaseTest): + def __init__(self): + super().__init__(my_extra="--max-connections=1500 --innodb_buffer_pool_size=2G --innodb_log_file_size=1G") -class ThreadPooling: - def start_server(self, node): - my_extra = "--innodb_buffer_pool_size=2G --innodb_log_file_size=1G" - utility_cmd.start_pxc(parent_dir, WORKDIR, BASEDIR, node, - WORKDIR + '/node1/mysql.sock', USER, encryption, my_extra) - - def sysbench_run(self, socket, db, port): + def sysbench_run(self, port): # Sysbench data load - version = utility_cmd.version_check(BASEDIR) checksum = "" if int(version) < int("080000"): - checksum = table_checksum.TableChecksum(PT_BASEDIR, BASEDIR, WORKDIR, NODE, socket, debug) - checksum.sanity_check() + checksum = table_checksum.TableChecksum(self.node1, workdir, pt_basedir, debug) + checksum.sanity_check(self.pxc_nodes) - sysbench = sysbench_run.SysbenchRun(BASEDIR, WORKDIR, socket, debug) - result = sysbench.sanity_check(db) - utility_cmd.check_testcase(result, "Sysbench run sanity check") - result = sysbench.sysbench_load(db, 50, 50, SYSBENCH_NORMAL_TABLE_SIZE) - utility_cmd.check_testcase(result, "Sysbench data load (threads : " + str(SYSBENCH_THREADS) + ")") + sysbench = sysbench_run.SysbenchRun(self.node1, debug) + sysbench.test_sanity_check(db) + sysbench.test_sysbench_load(db, 50, 50, SYSBENCH_NORMAL_TABLE_SIZE) # Sysbench OLTP read write run - query = "sysbench /usr/share/sysbench/oltp_read_write.lua" \ - " --table-size=" + str(SYSBENCH_NORMAL_TABLE_SIZE) + \ - " --tables=" + str(50) + \ - " --threads=" + str(50) + \ - " --mysql-db=test --mysql-user=" + SYSBENCH_USER + \ - " --mysql-password=" + SYSBENCH_PASS + \ - " --db-driver=mysql --mysql-host=127.0.0.1 --mysql-port=" + str(port) + \ - " --time=300 --db-ps-mode=disable run > " + WORKDIR + "/log/sysbench_read_write.log" - if debug == 'YES': - print(query) - query_status = os.system(query) - if int(query_status) != 0: - print("ERROR!: sysbench read write run is failed") - utility_cmd.check_testcase(result, "Sysbench read write run") - utility_cmd.check_testcase(0, "Sysbench read write run") + sysbench.test_sysbench_oltp_read_write(db, 50, 50, SYSBENCH_NORMAL_TABLE_SIZE, + 300, port=port) - def thread_pooling_qa(self, socket, db): + def thread_pooling_qa(self): # Thread Pooling QA thread_handling_option = ['pool-of-threads', 'one-thread-per-connection'] - thread_pool_size = [2, 4, 8] - thread_pool_max_threads = [2, 4, 8] + thread_pool_size = [2] # 4, 8 + thread_pool_max_threads = [2, 8] # 4 for tp_option, tp_size, tp_max_thread in \ itertools.product(thread_handling_option, thread_pool_size, thread_pool_max_threads): my_extra = "--thread_handling=" + tp_option + " --thread_pool_size=" + str(tp_size) + \ " --thread_pool_max_threads=" + str(tp_max_thread) # Start PXC cluster for encryption test utility_cmd.check_testcase(0, "Thread pooling options : " + my_extra) - dbconnection_check = db_connection.DbConnection(USER, WORKDIR + '/node1/mysql.sock') - server_startup = pxc_startup.StartCluster(parent_dir, WORKDIR, BASEDIR, int(NODE), debug) - result = server_startup.sanity_check() - utility_cmd.check_testcase(result, "Startup sanity check") - result = server_startup.create_config('none') - utility_cmd.check_testcase(result, "Configuration file creation") - result = server_startup.initialize_cluster() - utility_cmd.check_testcase(result, "Initializing cluster") - for i in range(1, int(NODE) + 1): - n_name = open(WORKDIR + '/conf/node' + str(i) + '.cnf', 'a+') - n_name.write('admin_address=127.0.0.1\n') - n_name.write('admin_port=' + str(33062 + i) + '\n') - n_name.close() - - result = server_startup.start_cluster(my_extra) - utility_cmd.check_testcase(result, "Cluster startup") - result = dbconnection_check.connection_check() - utility_cmd.check_testcase(result, "Database connection") - self.sysbench_run(WORKDIR + '/node1/mysql.sock', 'test', 33063) - utility_cmd.stop_pxc(WORKDIR, BASEDIR, NODE) - - -print("--------------------------------") -print("\nPXC Thread Pooling test") -print("--------------------------------") + server_startup = pxc_startup.StartCluster(3, debug) + server_startup.sanity_check() + server_startup.create_config('none', set_admin_address=True) + server_startup.initialize_cluster() + self.pxc_nodes = server_startup.start_cluster(my_extra) + self.node1 = self.pxc_nodes[0] + utility_cmd.check_testcase(self.node1.connection_check(), "Database connection") + self.sysbench_run(33063) + self.shutdown_nodes(self.pxc_nodes) + + +utility.test_header("PXC Thread Pooling test") thread_pooling = ThreadPooling() -thread_pooling.thread_pooling_qa(WORKDIR + '/node1/mysql.sock', 'test') - +thread_pooling.thread_pooling_qa() diff --git a/suite/loadtest/sysbench_load_test.py b/suite/loadtest/sysbench_load_test.py index 5a77e34..d0522fb 100755 --- a/suite/loadtest/sysbench_load_test.py +++ b/suite/loadtest/sysbench_load_test.py @@ -3,75 +3,45 @@ import sys import argparse import time + cwd = os.path.dirname(os.path.realpath(__file__)) parent_dir = os.path.normpath(os.path.join(cwd, '../../')) sys.path.insert(0, parent_dir) +from base_test import * from config import * from util import sysbench_run from util import utility from util import table_checksum -# Read argument -parser = argparse.ArgumentParser(prog='PXC sysbench load test', usage='%(prog)s [options]') -parser.add_argument('-e', '--encryption-run', action='store_true', - help='This option will enable encryption options') -parser.add_argument('-d', '--debug', action='store_true', - help='This option will enable debug logging') -args = parser.parse_args() -if args.encryption_run is True: - encryption = 'YES' -else: - encryption = 'NO' -if args.debug is True: - debug = 'YES' -else: - debug = 'NO' - -utility_cmd = utility.Utility(debug) -utility_cmd.check_python_version() - -class SysbenchLoadTest: - def start_server(self, socket, node): - if SERVER == "pxc": - my_extra = "--innodb_buffer_pool_size=8G --innodb_log_file_size=1G" - utility_cmd.start_pxc(parent_dir, WORKDIR, BASEDIR, node, socket, USER, encryption, my_extra) - elif SERVER == "ps": - my_extra = "--innodb_buffer_pool_size=8G --innodb_log_file_size=1G" - utility_cmd.start_ps(parent_dir, WORKDIR, BASEDIR, node, socket, USER, encryption, my_extra) +class SysbenchLoadTest(BaseTest): + def __init__(self, number_of_nodes: int = None): + super().__init__(my_extra="--max-connections=1500 --innodb_buffer_pool_size=2G --innodb_log_file_size=1G") - def sysbench_run(self, socket, db): + def sysbench_run(self, nodes: list[DbConnection]): # Sysbench load test - threads = [32, 64, 128, 256, 1024] - version = utility_cmd.version_check(BASEDIR) - checksum = "" + threads = [32, 1024] # 64, 128, 256 if int(version) < int("080000"): - checksum = table_checksum.TableChecksum(PT_BASEDIR, BASEDIR, WORKDIR, NODE, socket, debug) - checksum.sanity_check() + checksum = table_checksum.TableChecksum(nodes[0], workdir, pt_basedir, debug) + checksum.sanity_check(nodes) for thread in threads: - sysbench = sysbench_run.SysbenchRun(BASEDIR, WORKDIR, - socket, debug) + sysbench = sysbench_run.SysbenchRun(nodes[0], debug) if thread == 32: - result = sysbench.sanity_check(db) - utility_cmd.check_testcase(result, "Sysbench run sanity check") - result = sysbench.sysbench_cleanup(db, thread, thread, SYSBENCH_LOAD_TEST_TABLE_SIZE) - utility_cmd.check_testcase(result, "Sysbench data cleanup (threads : " + str(thread) + ")") - result = sysbench.sysbench_load(db, thread, thread, SYSBENCH_LOAD_TEST_TABLE_SIZE) - utility_cmd.check_testcase(result, "Sysbench data load (threads : " + str(thread) + ")") - time.sleep(5) - result = utility_cmd.check_table_count(BASEDIR, db, socket, WORKDIR + '/node2/mysql.sock') - utility_cmd.check_testcase(result, "Checksum run for DB: test") + sysbench.test_sanity_check(db) + sysbench.test_sysbench_cleanup(db, thread, thread, SYSBENCH_LOAD_TEST_TABLE_SIZE) + sysbench.test_sysbench_load(db, thread, thread, SYSBENCH_LOAD_TEST_TABLE_SIZE) + time.sleep(60) + utility_cmd.test_table_count(nodes[0], nodes[2], db) -print("-------------------------") -print("\nPXC sysbench load test") -print("------------------------") +utility.test_header("PXC sysbench load test") sysbench_loadtest = SysbenchLoadTest() if SERVER == "pxc": - sysbench_loadtest.start_server(WORKDIR + '/node1/mysql.sock', NODE) - sysbench_loadtest.sysbench_run(WORKDIR + '/node1/mysql.sock', 'test') - utility_cmd.stop_pxc(WORKDIR, BASEDIR, NODE) + sysbench_loadtest.start_pxc() + sysbench_loadtest.sysbench_run(sysbench_loadtest.pxc_nodes) + sysbench_loadtest.shutdown_nodes() elif SERVER == "ps": - sysbench_loadtest.start_server(PS1_SOCKET, 1) - sysbench_loadtest.sysbench_run(PS1_SOCKET, 'test') - utility_cmd.stop_ps(WORKDIR, BASEDIR, 1) + sysbench_loadtest.set_number_of_nodes(1) + sysbench_loadtest.start_ps() + sysbench_loadtest.sysbench_run(sysbench_loadtest.ps_nodes) + sysbench_loadtest.shutdown_nodes(sysbench_loadtest.ps_nodes) diff --git a/suite/loadtest/sysbench_random_load_test.py b/suite/loadtest/sysbench_random_load_test.py index eeedc31..e54cbc4 100755 --- a/suite/loadtest/sysbench_random_load_test.py +++ b/suite/loadtest/sysbench_random_load_test.py @@ -1,81 +1,50 @@ #!/usr/bin/env python3 import os -import time import sys -import argparse + cwd = os.path.dirname(os.path.realpath(__file__)) parent_dir = os.path.normpath(os.path.join(cwd, '../../')) sys.path.insert(0, parent_dir) +from base_test import * from config import * from util import sysbench_run from util import utility from util import table_checksum -# Read argument -parser = argparse.ArgumentParser(prog='PXC sysbench random load test', usage='%(prog)s [options]') -parser.add_argument('-e', '--encryption-run', action='store_true', - help='This option will enable encryption options') -parser.add_argument('-d', '--debug', action='store_true', - help='This option will enable debug logging') -args = parser.parse_args() -if args.encryption_run is True: - encryption = 'YES' -else: - encryption = 'NO' -if args.debug is True: - debug = 'YES' -else: - debug = 'NO' - -utility_cmd = utility.Utility(debug) -utility_cmd.check_python_version() - +class SysbenchRandomLoadTest(BaseTest): + def __init__(self): + super().__init__(my_extra="--max-connections=1500 --innodb_buffer_pool_size=2G --innodb_log_file_size=1G") -class SysbenchRandomLoadTest: - def start_server(self, socket, node): - if SERVER == "pxc": - my_extra = "--innodb_buffer_pool_size=8G --innodb_log_file_size=1G" - utility_cmd.start_pxc(parent_dir, WORKDIR, BASEDIR, node, socket, USER, encryption, my_extra) - elif SERVER == "ps": - my_extra = "--innodb_buffer_pool_size=8G --innodb_log_file_size=1G" - utility_cmd.start_ps(parent_dir, WORKDIR, BASEDIR, node, socket, USER, encryption, my_extra) - - def sysbench_run(self, socket, db): + def sysbench_run(self, nodes: list[DbConnection]): checksum = "" # Sysbench load test - tables = [50, 100, 300, 600, 1000] - threads = [32, 64, 128, 256, 512, 1024] - version = utility_cmd.version_check(BASEDIR) + tables = [50, 1000] # 100, 300, 600 + threads = [32, 1024] # 64, 128, 256, 512 if int(version) < int("080000"): - checksum = table_checksum.TableChecksum(PT_BASEDIR, BASEDIR, WORKDIR, NODE, socket, debug) - checksum.sanity_check() - sysbench = sysbench_run.SysbenchRun(BASEDIR, WORKDIR, - socket, debug) - result = sysbench.sanity_check(db) + checksum = table_checksum.TableChecksum(nodes[0], workdir, pt_basedir, debug) + checksum.sanity_check(nodes) + sysbench = sysbench_run.SysbenchRun(nodes[0], debug) + sysbench.test_sanity_check(db) for table_count in tables: - utility_cmd.check_testcase(result, "Sysbench run sanity check") - result = sysbench.sysbench_cleanup(db, table_count, table_count, SYSBENCH_RANDOM_LOAD_TABLE_SIZE) - utility_cmd.check_testcase(result, "Sysbench data cleanup (threads : " + str(table_count) + ")") - result = sysbench.sysbench_load(db, table_count, table_count, SYSBENCH_RANDOM_LOAD_TABLE_SIZE) - utility_cmd.check_testcase(result, "Sysbench data load (threads : " + str(table_count) + ")") + sysbench.test_sysbench_cleanup(db, table_count, table_count, SYSBENCH_RANDOM_LOAD_TABLE_SIZE) + sysbench.test_sysbench_load(db, table_count, table_count, SYSBENCH_RANDOM_LOAD_TABLE_SIZE) for thread in threads: sysbench.sysbench_oltp_read_write(db, table_count, thread, SYSBENCH_RANDOM_LOAD_TABLE_SIZE, SYSBENCH_RANDOM_LOAD_RUN_TIME) time.sleep(5) - result = utility_cmd.check_table_count(BASEDIR, db, socket, WORKDIR + '/node2/mysql.sock') - utility_cmd.check_testcase(result, "Checksum run for DB: " + db) + utility_cmd.test_table_count(nodes[0], nodes[1], db) + +utility.test_header("PXC sysbench random load test") -print("-------------------------------") -print("\nPXC sysbench random load test") -print("-------------------------------") sysbench_random_loadtest = SysbenchRandomLoadTest() if SERVER == "pxc": - sysbench_random_loadtest.start_server(WORKDIR + '/node1/mysql.sock', NODE) - sysbench_random_loadtest.sysbench_run(WORKDIR + '/node1/mysql.sock', 'test') - utility_cmd.stop_pxc(WORKDIR, BASEDIR, NODE) + sysbench_random_loadtest.start_pxc() + sysbench_random_loadtest.sysbench_run(sysbench_random_loadtest.pxc_nodes) + sysbench_random_loadtest.shutdown_nodes() elif SERVER == "ps": - sysbench_random_loadtest.start_server(PS1_SOCKET, 1) - sysbench_random_loadtest.sysbench_run(PS1_SOCKET, 'test') - utility_cmd.stop_ps(WORKDIR, BASEDIR, 1) + sysbench_random_loadtest.set_number_of_nodes(1) + sysbench_random_loadtest.start_ps() + sysbench_random_loadtest.sysbench_run(sysbench_random_loadtest.ps_nodes) + sysbench_random_loadtest.shutdown_nodes(sysbench_random_loadtest.ps_nodes) diff --git a/suite/loadtest/sysbench_wsrep_provider_option_random_test.py b/suite/loadtest/sysbench_wsrep_provider_option_random_test.py old mode 100644 new mode 100755 index f624830..cb5aa72 --- a/suite/loadtest/sysbench_wsrep_provider_option_random_test.py +++ b/suite/loadtest/sysbench_wsrep_provider_option_random_test.py @@ -1,90 +1,26 @@ #!/usr/bin/env python3 import os import sys -import argparse import itertools -import time -import subprocess -from datetime import datetime + cwd = os.path.dirname(os.path.realpath(__file__)) parent_dir = os.path.normpath(os.path.join(cwd, '../../')) sys.path.insert(0, parent_dir) -from config import * +from base_test import * from util import sysbench_run from util import utility -from util import db_connection -from util import pxc_startup - - -# Read argument -parser = argparse.ArgumentParser(prog='PXC WSREP provider random test', usage='%(prog)s [options]') -parser.add_argument('-e', '--encryption-run', action='store_true', - help='This option will enable encryption options') -parser.add_argument('-d', '--debug', action='store_true', - help='This option will enable debug logging') -args = parser.parse_args() -if args.encryption_run is True: - encryption = 'YES' -else: - encryption = 'NO' -if args.debug is True: - debug = 'YES' -else: - debug = 'NO' -utility_cmd = utility.Utility(debug) -utility_cmd.check_python_version() +class WSREPProviderRandomTest(BaseTest): + def __init__(self): + super().__init__(my_extra="--max-connections=1500 --innodb_buffer_pool_size=2G --innodb_log_file_size=1G") -class WSREPProviderRandomTest: - def startup_check(self, cluster_node): - """ This method will check the node recovery - startup status. - """ - restart_server = "bash " + WORKDIR + \ - '/log/startup' + str(cluster_node) + '.sh' - os.system(restart_server) - ping_query = BASEDIR + '/bin/mysqladmin --user=root --socket=' + \ - WORKDIR + '/node' + str(cluster_node) + '/mysql.sock ping > /dev/null 2>&1' - for startup_timer in range(120): - time.sleep(1) - ping_check = subprocess.call(ping_query, shell=True, stderr=subprocess.DEVNULL) - ping_status = ("{}".format(ping_check)) - if int(ping_status) == 0: - utility_cmd.check_testcase(int(ping_status), "Cluster node restart is successful") - break # break the loop if mysqld is running - - def start_random_test(self, socket, db): - my_extra = "--innodb_buffer_pool_size=8G --innodb_log_file_size=1G" - dbconnection_check = db_connection.DbConnection(USER, socket) - server_startup = pxc_startup.StartCluster(parent_dir, WORKDIR, BASEDIR, int(NODE), debug) - result = server_startup.sanity_check() - utility_cmd.check_testcase(result, "Startup sanity check") - result = server_startup.initialize_cluster() - utility_cmd.check_testcase(result, "Initializing cluster") - if encryption == 'YES': - result = server_startup.create_config('encryption') - utility_cmd.check_testcase(result, "Configuration file creation") - else: - result = server_startup.create_config('none') - utility_cmd.check_testcase(result, "Configuration file creation") - result = server_startup.start_cluster(my_extra) - utility_cmd.check_testcase(result, "Cluster startup") - result = dbconnection_check.connection_check() - utility_cmd.check_testcase(result, "Database connection") - # Sysbench load test - sysbench = sysbench_run.SysbenchRun(BASEDIR, WORKDIR, - socket, debug) - result = sysbench.sanity_check(db) - utility_cmd.check_testcase(result, "Sysbench run sanity check") - result = sysbench.sysbench_load(db, 64, 64, SYSBENCH_LOAD_TEST_TABLE_SIZE) - utility_cmd.check_testcase(result, "Sysbench data load") - utility_cmd.stop_pxc(WORKDIR, BASEDIR, NODE) + def start_random_test(self): wsrep_provider_options = { - "gcache.keep_pages_size": [0, 1, 2], + "gcache.keep_pages_size": [0, 2], # 1 "gcache.recover": ["yes", "no"], "gcache.page_size": ["512M", "1024M"], - "gcache.size": ["512M", "1024M", "2048M"], + "gcache.size": ["512M", "2048M"], # "1024M" #"repl.commit_order": [0, 1, 2, 3] } @@ -98,46 +34,28 @@ def start_random_test(self, socket, db): print(datetime.now().strftime("%H:%M:%S ") + " WSREP Provider combination(" + wsrep_provider_option + ")") - if encryption == 'YES': - result = server_startup.create_config('encryption', wsrep_provider_option) - utility_cmd.check_testcase(result, "Updated configuration file") - else: - result = server_startup.create_config('none', wsrep_provider_option) - utility_cmd.check_testcase(result, "Updated configuration file") + self.set_wsrep_provider_options(wsrep_provider_option) + self.start_pxc() + sysbench = sysbench_run.SysbenchRun(self.node1, debug) + sysbench.test_sanity_check(db) + sysbench.test_sysbench_load(db, use_load_table_size=True) - result = server_startup.start_cluster() - utility_cmd.check_testcase(result, "Cluster startup") - result = dbconnection_check.connection_check() - utility_cmd.check_testcase(result, "Database connection check") - result = sysbench.sysbench_oltp_read_write(db, 64, 64, - SYSBENCH_LOAD_TEST_TABLE_SIZE, 300, 'Yes') - utility_cmd.check_testcase(result, "Sysbench oltp run initiated") - query = 'pidof sysbench' - sysbench_pid = os.popen(query).read().rstrip() + sysbench.test_sysbench_oltp_read_write(db, time=300, background=True, use_load_table_size=True) + sysbench_pid = utility.sysbench_pid() + print("Sysbench pid : " + sysbench_pid) time.sleep(100) - shutdown_node = BASEDIR + '/bin/mysqladmin --user=root --socket=' + \ - WORKDIR + '/node3/mysql.sock shutdown > /dev/null 2>&1' - if debug == 'YES': - print(shutdown_node) - result = os.system(shutdown_node) - utility_cmd.check_testcase(result, "Shutdown cluster node for IST/SST check") - time.sleep(5) - kill_sysbench = "kill -9 " + sysbench_pid - if debug == 'YES': - print("Terminating sysbench run : " + kill_sysbench) - os.system(kill_sysbench) - self.startup_check(3) + self.node2.shutdown() + time.sleep(20) + utility_cmd.kill_process(sysbench_pid, "sysbench", True) + utility_cmd.restart_and_check_node(self.node2) + utility_cmd.wait_for_wsrep_status(self.node2) wsrep_provider_option = '' time.sleep(5) - result = utility_cmd.check_table_count(BASEDIR, db, socket, WORKDIR + '/node2/mysql.sock') - utility_cmd.check_testcase(result, "Checksum run for DB: test") - utility_cmd.stop_pxc(WORKDIR, BASEDIR, NODE) + utility_cmd.test_table_count(self.node1, self.node2, db) + self.shutdown_nodes() -print("--------------------------------") -print("\nPXC WSREP provider random test") -print("--------------------------------") +utility.test_header("PXC WSREP provider random test") sysbench_wsrep_provider_random_test = WSREPProviderRandomTest() -sysbench_wsrep_provider_random_test.start_random_test(WORKDIR + '/node1/mysql.sock', 'test') -utility_cmd.stop_pxc(WORKDIR, BASEDIR, NODE) +sysbench_wsrep_provider_random_test.start_random_test() diff --git a/suite/random_qa/pstress_crash_recovery_qa.py b/suite/random_qa/pstress_crash_recovery_qa.py index bcaf486..870e929 100755 --- a/suite/random_qa/pstress_crash_recovery_qa.py +++ b/suite/random_qa/pstress_crash_recovery_qa.py @@ -1,122 +1,39 @@ #!/usr/bin/env python3 import os -import sys -import argparse -import time -import subprocess -import itertools import random +import sys + cwd = os.path.dirname(os.path.realpath(__file__)) parent_dir = os.path.normpath(os.path.join(cwd, '../../')) sys.path.insert(0, parent_dir) -from config import * -from util import pxc_startup -from util import db_connection + +from base_test import * from util import utility -# Read argument -parser = argparse.ArgumentParser(prog='PXC random mysqld option test', usage='%(prog)s [options]') -parser.add_argument('-e', '--encryption-run', action='store_true', - help='This option will enable encryption options') -parser.add_argument('-d', '--debug', action='store_true', - help='This option will enable debug logging') -args = parser.parse_args() if args.encryption_run is True: - encryption = 'YES' - PSTRESS_EXTRA = "" + pstress_extra = "" else: - encryption = 'NO' - PSTRESS_EXTRA = "--no-encryption" - -if args.debug is True: - debug = 'YES' -else: - debug = 'NO' - -utility_cmd = utility.Utility(debug) -utility_cmd.check_python_version() + pstress_extra = "--no-encryption" -class RandomPstressQA: - def start_pxc(self): - # Start PXC cluster for pstress run - dbconnection_check = db_connection.DbConnection(USER, WORKDIR + '/node1/mysql.sock') - server_startup = pxc_startup.StartCluster(parent_dir, WORKDIR, BASEDIR, int(NODE), debug) - result = server_startup.sanity_check() - utility_cmd.check_testcase(result, "Startup sanity check") - if encryption == 'YES': - result = server_startup.create_config('encryption') - utility_cmd.check_testcase(result, "Configuration file creation") - else: - result = server_startup.create_config('none') - utility_cmd.check_testcase(result, "Configuration file creation") - result = server_startup.initialize_cluster() - utility_cmd.check_testcase(result, "Initializing cluster") - result = server_startup.start_cluster('--max-connections=1500') - utility_cmd.check_testcase(result, "Cluster startup") - result = dbconnection_check.connection_check() - utility_cmd.check_testcase(result, "Database connection") - query = BASEDIR + "/bin/mysql --user=root --socket=" + \ - WORKDIR + "/node1/mysql.sock -e'drop database if exists test " \ - "; create database test ;' > /dev/null 2>&1" - if debug == 'YES': - print(query) - query_status = os.system(query) - if int(query_status) != 0: - # return 1 - print("ERROR!: Could not create test database.") - exit(1) - def startup_check(self, cluster_node): - """ This method will check the node - startup status. - """ - ping_query = BASEDIR + '/bin/mysqladmin --user=root --socket=' + \ - WORKDIR + '/node' + str(cluster_node) + '/mysql.sock ping > /dev/null 2>&1' - for startup_timer in range(120): - time.sleep(1) - ping_check = subprocess.call(ping_query, shell=True, stderr=subprocess.DEVNULL) - ping_status = ("{}".format(ping_check)) - if int(ping_status) == 0: - utility_cmd.check_testcase(int(ping_status), "Cluster restart is successful") - break # break the loop if mysqld is running +class RandomPstressQA(BaseTest): + def __init__(self): + super().__init__() - def data_load(self, socket, db): + def data_load(self): # pstress crash recovery qa self.start_pxc() + queries = ["drop database if exists test", "create database test"] + self.node1.execute_queries(queries) n = random.randint(10000, 99999) for i in range(1, 10): - PSTRESS_CMD = PSTRESS_BIN + " --database=" + db + " --threads=50 --logdir=" + \ - WORKDIR + "/log --log-all-queries --log-failed-queries --user=root --socket=" + \ - socket + " --seed " + str(n) + " --tables 25 --records 1000 " + \ - PSTRESS_EXTRA + " --seconds 300 --grammar-file " + \ - PSTRESS_GRAMMAR_FILE + " --step " + str(i) + " > " + \ - WORKDIR + "/log/pstress_run.log" - utility_cmd.check_testcase(0, "PSTRESS RUN command : " + PSTRESS_CMD) - query_status = os.system(PSTRESS_CMD) - if int(query_status) != 0: - utility_cmd.check_testcase(1, "ERROR!: PSTRESS run failed") + utility_cmd.pstress_run(socket=self.node1.get_socket(), db=db, seed=n, step_num=i, + pstress_extra=pstress_extra, workdir=workdir) # kill existing mysqld process - if debug == 'YES': - print("Killing existing mysql process using 'kill -9' command") - os.system("ps -ef | grep '" + WORKDIR + "/conf/node[0-9].cnf' | grep -v grep | " - "awk '{print $2}' | xargs kill -9 >/dev/null 2>&1") - for j in range(1, int(NODE) + 1): - if j == 1: - os.system("sed -i 's#safe_to_bootstrap: 0#safe_to_bootstrap: 1#' " + - WORKDIR + '/node1/grastate.dat') - startup = "bash " + WORKDIR + \ - '/log/startup' + str(j) + '.sh' - if debug == 'YES': - print(startup) - os.system(startup) - self.startup_check(j) + utility_cmd.kill_cluster_nodes() + utility_cmd.restart_cluster(self.pxc_nodes) -print("-----------------------------") -print("PXC Crash Recovery PSTRESS QA") -print("-----------------------------") +utility.test_header("PXC Crash Recovery PSTRESS QA") random_pstress_qa = RandomPstressQA() -if not os.path.isfile(PSTRESS_BIN): - print(PSTRESS_BIN + ' does not exist') - exit(1) -random_pstress_qa.data_load(WORKDIR + '/node1/mysql.sock', 'test') +random_pstress_qa.data_load() diff --git a/suite/random_qa/pstress_random_qa.py b/suite/random_qa/pstress_random_qa.py index a436e79..58fdab3 100755 --- a/suite/random_qa/pstress_random_qa.py +++ b/suite/random_qa/pstress_random_qa.py @@ -1,96 +1,41 @@ #!/usr/bin/env python3 import os import sys -import argparse import itertools + cwd = os.path.dirname(os.path.realpath(__file__)) parent_dir = os.path.normpath(os.path.join(cwd, '../../')) sys.path.insert(0, parent_dir) -from config import * -from util import pxc_startup -from util import db_connection + +from base_test import * from util import utility -# Read argument -parser = argparse.ArgumentParser(prog='PXC random mysqld option test', usage='%(prog)s [options]') -parser.add_argument('-e', '--encryption-run', action='store_true', - help='This option will enable encryption options') -parser.add_argument('-d', '--debug', action='store_true', - help='This option will enable debug logging') -args = parser.parse_args() if args.encryption_run is True: - encryption = 'YES' - PSTRESS_EXTRA = "" -else: - encryption = 'NO' - PSTRESS_EXTRA = "--no-encryption" - -if args.debug is True: - debug = 'YES' + pstress_extra = "" else: - debug = 'NO' - -utility_cmd = utility.Utility(debug) -utility_cmd.check_python_version() - + pstress_extra = "--no-encryption" -class RandomPstressQA: - def start_pxc(self): - # Start PXC cluster for pstress run - dbconnection_check = db_connection.DbConnection(USER, WORKDIR + '/node1/mysql.sock') - server_startup = pxc_startup.StartCluster(parent_dir, WORKDIR, BASEDIR, int(NODE), debug) - result = server_startup.sanity_check() - utility_cmd.check_testcase(result, "Startup sanity check") - if encryption == 'YES': - result = server_startup.create_config('encryption') - utility_cmd.check_testcase(result, "Configuration file creation") - else: - result = server_startup.create_config('none') - utility_cmd.check_testcase(result, "Configuration file creation") - result = server_startup.initialize_cluster() - utility_cmd.check_testcase(result, "Initializing cluster") - result = server_startup.start_cluster('--max-connections=1500') - utility_cmd.check_testcase(result, "Cluster startup") - result = dbconnection_check.connection_check() - utility_cmd.check_testcase(result, "Database connection") - query = BASEDIR + "/bin/mysql --user=root --socket=" + \ - WORKDIR + "/node1/mysql.sock -e'drop database if exists test " \ - "; create database test ;' > /dev/null 2>&1" - if debug == 'YES': - print(query) - query_status = os.system(query) - if int(query_status) != 0: - # return 1 - print("ERROR!: Could not create test database.") - exit(1) - def data_load(self, socket, db): +class RandomPstressQA(BaseTest): + def __init__(self): + super().__init__(my_extra='--max-connections=1500') + def data_load(self): # pstress random load - threads = [16, 64, 512, 1024] - tables = [16, 32, 64, 128] - records = [100, 500, 1000] - seeds = [100, 500, 1000] + threads = [64, 1024] # 512, 16 + tables = [32, 128] # 64, 16 + records = [100, 500, 1000] # 200 + seeds = [1000] # 500, 100 for thread, table, record, seed in \ itertools.product(threads, tables, records, seeds): self.start_pxc() - pstress_cmd = PSTRESS_BIN + " --database=" + db + " --threads=" + str(table) + " --logdir=" + \ - WORKDIR + "/log --log-all-queries --log-failed-queries --user=root --socket=" + \ - socket + " --seed " + str(seed) + " --tables " + str(table) + " " + \ - PSTRESS_EXTRA + " --seconds 300 --grammar-file " + \ - PSTRESS_GRAMMAR_FILE + " --records " + str(record) + "> " + \ - WORKDIR + "/log/pstress_run.log" - utility_cmd.check_testcase(0, "PSTRESS RUN command : " + pstress_cmd) - query_status = os.system(pstress_cmd) - if int(query_status) != 0: - utility_cmd.check_testcase(1, "ERROR!: PSTRESS run is failed") + queries = ["drop database if exists test", "create database test"] + self.node1.execute_queries(queries) + utility_cmd.pstress_run(socket=self.node1.get_socket(), db=db, seed=seed, tables=table, + threads=table, records=record, pstress_extra=pstress_extra, + workdir=workdir) -print("--------------------") -print("PXC Random PSTRESS QA") -print("--------------------") +utility.test_header("PXC Random PSTRESS QA") random_pstress_qa = RandomPstressQA() -if not os.path.isfile(PSTRESS_BIN): - print(PSTRESS_BIN + ' does not exist') - exit(1) -random_pstress_qa.data_load(WORKDIR + '/node1/mysql.sock', 'test') +random_pstress_qa.data_load() diff --git a/suite/random_qa/pxc_util.py b/suite/random_qa/pxc_util.py deleted file mode 100644 index 87b39d9..0000000 --- a/suite/random_qa/pxc_util.py +++ /dev/null @@ -1,97 +0,0 @@ -#!/usr/bin/env python3 -import os -import sys -import argparse -import time -import subprocess -import itertools -cwd = os.path.dirname(os.path.realpath(__file__)) -parent_dir = os.path.normpath(os.path.join(cwd, '../../')) -sys.path.insert(0, parent_dir) -from config import * -from util import pxc_startup -from util import db_connection -from util import utility - -# Read argument -parser = argparse.ArgumentParser(prog='PXC Utility', usage='%(prog)s [options]') -parser.add_argument('-e', '--encryption-run', action='store_true', - help='This option will enable encryption options') -parser.add_argument('--start', action='store_true', - help='Start PXC nodes') -parser.add_argument('--stop', action='store_true', - help='Stop PXC nodes') -parser.add_argument('-d', '--debug', action='store_true', - help='This option will enable debug logging') -args = parser.parse_args() -if args.encryption_run is True: - encryption = 'YES' -else: - encryption = 'NO' -if args.debug is True: - debug = 'YES' -else: - debug = 'NO' - -utility_cmd = utility.Utility(debug) -utility_cmd.check_python_version() - - -class PXCUtil: - def start_pxc(self): - # Start PXC cluster - dbconnection_check = db_connection.DbConnection(USER, WORKDIR + '/node1/mysql.sock') - server_startup = pxc_startup.StartCluster(parent_dir, WORKDIR, BASEDIR, int(NODE), debug) - result = server_startup.sanity_check() - utility_cmd.check_testcase(result, "Startup sanity check") - # Check encryption run - if encryption == 'YES': - # Add encryption options - result = server_startup.create_config('encryption') - utility_cmd.check_testcase(result, "Configuration file creation") - else: - result = server_startup.create_config('none') - utility_cmd.check_testcase(result, "Configuration file creation") - # Initialize cluster (create data directory) - result = server_startup.initialize_cluster() - utility_cmd.check_testcase(result, "Initializing cluster") - # Start cluster - result = server_startup.start_cluster('--max-connections=1500 ') - utility_cmd.check_testcase(result, "Cluster startup") - # Check DB connection - result = dbconnection_check.connection_check() - utility_cmd.check_testcase(result, "Database connection") - # Create database test - query = BASEDIR + "/bin/mysql --user=root --socket=" + \ - WORKDIR + "/node1/mysql.sock -e'drop database if exists test " \ - "; create database test ;' > /dev/null 2>&1" - if debug == 'YES': - print(query) - query_status = os.system(query) - if int(query_status) != 0: - # return 1 - print("ERROR!: Could not create test database.") - exit(1) - utility_cmd.check_testcase(0, "PXC connection string") - for i in range(1, int(NODE) + 1): - # Print connection string - print('\t' + BASEDIR + '/bin/mysql --user=root --socket=' + - WORKDIR + '/node' + str(i) + '/mysql.sock') - - def stop_pxc(self): - # Stop PXC cluster - for i in range(int(NODE), 0, -1): - shutdown_node = BASEDIR + '/bin/mysqladmin --user=root --socket=' + \ - WORKDIR + '/node' + str(i) + '/mysql.sock shutdown > /dev/null 2>&1' - result = os.system(shutdown_node) - utility_cmd.check_testcase(result, "PXC: shutting down cluster node" + str(i)) - - -pxc_util = PXCUtil() -if args.start is True: - # Start Cluster - pxc_util.start_pxc() - -if args.stop is True: - # Stop cluster - pxc_util.stop_pxc() diff --git a/suite/random_qa/random_mysqld_option_test.py b/suite/random_qa/random_mysqld_option_test.py index 456bf98..82998d8 100755 --- a/suite/random_qa/random_mysqld_option_test.py +++ b/suite/random_qa/random_mysqld_option_test.py @@ -1,110 +1,68 @@ #!/usr/bin/env python3 import os -import sys -import argparse import shutil +import sys + cwd = os.path.dirname(os.path.realpath(__file__)) parent_dir = os.path.normpath(os.path.join(cwd, '../../')) sys.path.insert(0, parent_dir) +from base_test import * from config import * -from util import pxc_startup -from util import db_connection +from util import pxc_startup, executesql from util import sysbench_run from util import utility -from util import createsql -# Read argument -parser = argparse.ArgumentParser(prog='PXC random mysqld option test', usage='%(prog)s [options]') -parser.add_argument('-e', '--encryption-run', action='store_true', - help='This option will enable encryption options') -parser.add_argument('-d', '--debug', action='store_true', - help='This option will enable debug logging') -args = parser.parse_args() -if args.encryption_run is True: - encryption = 'YES' -else: - encryption = 'NO' -if args.debug is True: - debug = 'YES' -else: - debug = 'NO' +conf_file = parent_dir + '/conf/mysql_options_pxc80.txt' +random_mysql_error_dir = WORKDIR + '/random_mysql_error' -utility_cmd = utility.Utility(debug) -utility_cmd.check_python_version() +class RandomMySQLDOptionQA(BaseTest): + def __init__(self): + super().__init__(my_extra='--max-connections=1500') -class RandomMySQLDOptionQA: - - def data_load(self, socket, db): + def data_load(self): # Sysbench data load - sysbench = sysbench_run.SysbenchRun(BASEDIR, WORKDIR, socket, debug) - result = sysbench.sanity_check(db) - utility_cmd.check_testcase(result, "Sysbench run sanity check") - result = sysbench.sysbench_load(db, 10, 10, SYSBENCH_NORMAL_TABLE_SIZE) - utility_cmd.check_testcase(result, "Sysbench data load") + sysbench = sysbench_run.SysbenchRun(self.node1, debug) + sysbench.test_sanity_check(db) + sysbench.test_sysbench_load(db, 10, 10, SYSBENCH_NORMAL_TABLE_SIZE) # Add prepared statement SQLs - create_ps = BASEDIR + "/bin/mysql --user=root --socket=" + \ - socket + ' < ' + parent_dir + '/util/prepared_statements.sql > /dev/null 2>&1' - result = os.system(create_ps) - utility_cmd.check_testcase(result, "Creating prepared statements") + self.node1.execute_queries_from_file(parent_dir + '/util/prepared_statements.sql') + # Random data load - if os.path.isfile(parent_dir + '/util/createsql.py'): - generate_sql = createsql.GenerateSQL('/tmp/dataload.sql', 1000) - generate_sql.OutFile() - generate_sql.CreateTable() - sys.stdout = sys.__stdout__ - data_load_query = BASEDIR + "/bin/mysql --user=root --socket=" + \ - socket + ' ' + db + ' -f < /tmp/dataload.sql >/dev/null 2>&1' - result = os.system(data_load_query) - utility_cmd.check_testcase(result, "Sample data load") + if os.path.isfile(parent_dir + '/util/executesql.py'): + execute_sql = executesql.GenerateSQL(self.node1, db, 1000) + execute_sql.create_table() -print("------------------------------") -print("PXC Random MySQLD options test") -print("------------------------------") -mysql_options = open(parent_dir + '/conf/mysql_options_pxc80.txt') +utility.test_header("PXC Random mysqld options test") +mysql_options = open(conf_file) +if os.path.exists(random_mysql_error_dir): + os.rmdir(random_mysql_error_dir) +os.mkdir(random_mysql_error_dir) + +i = 1 for mysql_option in mysql_options: - if os.path.exists(WORKDIR + '/random_mysql_error'): - os.system('rm -rf ' + WORKDIR + '/random_mysql_error >/dev/null 2>&1') - os.mkdir(WORKDIR + '/random_mysql_error') - else: - os.mkdir(WORKDIR + '/random_mysql_error') + i += 1 random_mysql_option_qa = RandomMySQLDOptionQA() # Start PXC cluster for random mysqld options QA - dbconnection_check = db_connection.DbConnection(USER, WORKDIR + '/node1/mysql.sock') - server_startup = pxc_startup.StartCluster(parent_dir, WORKDIR, BASEDIR, int(NODE), debug) - result = server_startup.sanity_check() - utility_cmd.check_testcase(result, "Startup sanity check") - if encryption == 'YES': - result = server_startup.create_config('encryption') - utility_cmd.check_testcase(result, "Configuration file creation") - else: - result = server_startup.create_config('none') - utility_cmd.check_testcase(result, "Configuration file creation") - - cnf_name = open(WORKDIR + '/conf/custom.cnf', 'a+') - cnf_name.write('\n') - cnf_name.write(mysql_option) - cnf_name.close() - - # result = utility_cmd.create_custom_cnf(parent_dir, WORKDIR) - utility_cmd.check_testcase(0, "Added random mysqld option: " + mysql_option) - result = server_startup.initialize_cluster() - utility_cmd.check_testcase(result, "Initializing cluster") - result = server_startup.start_cluster('--max-connections=1500') + server_startup = pxc_startup.StartCluster(random_mysql_option_qa.get_number_of_nodes(), debug) + server_startup.sanity_check() option = mysql_option.split('=')[0] opt_value = mysql_option.split('=')[1] - opt_dir = option + '_' + opt_value - if result != 0: - os.mkdir(WORKDIR + '/random_mysql_error/' + opt_dir) - shutil.copy(WORKDIR + '/conf/custom.cnf', WORKDIR + - '/random_mysql_error/' + opt_dir + '/custom.cnf') - shutil.copytree(WORKDIR + '/log', WORKDIR + '/random_mysql_error/' + opt_dir + '/log') + custom_conf_settings = {option: opt_value} + random_mysql_option_qa.start_pxc(custom_conf_settings=custom_conf_settings, terminate_on_startup_failure=False) + + opt_dir = random_mysql_error_dir + '/' + option + '_' + opt_value + if len(random_mysql_option_qa.pxc_nodes) != random_mysql_option_qa.get_number_of_nodes(): + if os.path.exists(opt_dir): + os.rmdir(opt_dir) + os.mkdir(opt_dir) + shutil.copytree(WORKDIR + '/log', opt_dir) continue - utility_cmd.check_testcase(result, "Cluster startup", "Not terminate") - result = dbconnection_check.connection_check() - utility_cmd.check_testcase(result, "Database connection") - random_mysql_option_qa.data_load(WORKDIR + '/node1/mysql.sock', 'test') - utility_cmd.stop_pxc(WORKDIR, BASEDIR, NODE) + random_mysql_option_qa.data_load() + random_mysql_option_qa.shutdown_nodes() + if i == 7: + print("Successfully tested cluster with seven mysqld options") + break mysql_options.close() diff --git a/suite/replication/backup_replication.py b/suite/replication/backup_replication.py index dce8550..69e872d 100755 --- a/suite/replication/backup_replication.py +++ b/suite/replication/backup_replication.py @@ -1,73 +1,21 @@ #!/usr/bin/env python3 import os import sys -import argparse -import shutil + cwd = os.path.dirname(os.path.realpath(__file__)) parent_dir = os.path.normpath(os.path.join(cwd, '../../')) sys.path.insert(0, parent_dir) +from base_test import * from config import * -from util import pxc_startup -from util import db_connection +from util import executesql from util import sysbench_run -from util import ps_startup from util import utility -from util import createsql - -# Read argument -parser = argparse.ArgumentParser(prog='PXC replication test using PXB', usage='%(prog)s [options]') -parser.add_argument('-e', '--encryption-run', action='store_true', - help='This option will enable encryption options') -parser.add_argument('-d', '--debug', action='store_true', - help='This option will enable debug logging') -args = parser.parse_args() -if args.encryption_run is True: - encryption = 'YES' -else: - encryption = 'NO' -if args.debug is True: - debug = 'YES' -else: - debug = 'NO' - -utility_cmd = utility.Utility(debug) -utility_cmd.check_python_version() -class SetupReplication: - def __init__(self, basedir, workdir, node): - self.basedir = basedir - self.workdir = workdir - self.node = node - - def start_pxc(self, my_extra=None): - """ Start Percona XtraDB Cluster. This method will - perform sanity checks for cluster startup - :param my_extra: We can pass extra PXC startup option - with this parameter - """ - # Start PXC cluster for replication test - if my_extra is None: - my_extra = '' +class SetupReplication(BaseTest): + def __init__(self): script_dir = os.path.dirname(os.path.realpath(__file__)) - dbconnection_check = db_connection.DbConnection(USER, WORKDIR + '/node1/mysql.sock') - server_startup = pxc_startup.StartCluster(parent_dir, WORKDIR, BASEDIR, int(self.node), debug) - result = server_startup.sanity_check() - utility_cmd.check_testcase(result, "PXC: Startup sanity check") - if encryption == 'YES': - result = server_startup.create_config('encryption') - utility_cmd.check_testcase(result, "PXC: Configuration file creation") - else: - result = server_startup.create_config('none') - utility_cmd.check_testcase(result, "PXC: Configuration file creation") - result = server_startup.initialize_cluster() - utility_cmd.check_testcase(result, "PXC: Initializing cluster") - result = server_startup.add_myextra_configuration(script_dir + '/replication.cnf') - utility_cmd.check_testcase(result, "PXC: Adding custom configuration") - result = server_startup.start_cluster(my_extra) - utility_cmd.check_testcase(result, "PXC: Cluster startup") - result = dbconnection_check.connection_check() - utility_cmd.check_testcase(result, "PXC: Database connection") + super().__init__(extra_config_file=script_dir + '/replication.cnf') def backup_pxc_node(self): """ Backup Cluster node using @@ -75,101 +23,47 @@ def backup_pxc_node(self): This method will also do sanity check before backup """ - utility_cmd.pxb_sanity_check(BASEDIR, WORKDIR, WORKDIR + '/node1/mysql.sock') - if os.path.exists(WORKDIR + '/psnode1'): - shutil.rmtree(WORKDIR + '/psnode1') - utility_cmd.pxb_backup(WORKDIR, WORKDIR + '/node1', WORKDIR + '/node1/mysql.sock', - encryption, WORKDIR + '/psnode1') + pxc_startup.StartCluster.pxb_sanity_check(self.node1, version) + return pxc_startup.StartCluster.pxb_backup(self.node1, encryption, True, debug) - def start_slave(self, node, my_extra=None): - """ Start Percona Server. This method will - perform sanity checks for PS startup - :param my_extra: We can pass extra PS startup - option with this parameter - """ - if my_extra is None: - my_extra = '' - # Start PXC cluster for replication test - script_dir = os.path.dirname(os.path.realpath(__file__)) - dbconnection_check = db_connection.DbConnection(USER, PS1_SOCKET) - server_startup = ps_startup.StartPerconaServer(parent_dir, WORKDIR, BASEDIR, int(node), debug) - result = server_startup.sanity_check() - utility_cmd.check_testcase(result, "PS: Startup sanity check") - if encryption == 'YES': - result = server_startup.create_config('encryption') - utility_cmd.check_testcase(result, "PS: Configuration file creation") - else: - result = server_startup.create_config() - utility_cmd.check_testcase(result, "PS: Configuration file creation") - result = server_startup.add_myextra_configuration(script_dir + '/replication.cnf') - utility_cmd.check_testcase(result, "PS: Adding custom configuration") - result = server_startup.initialize_cluster() - utility_cmd.check_testcase(result, "PS: Initializing PS server") - result = server_startup.start_server(my_extra) - utility_cmd.check_testcase(result, "PS: Cluster startup") - result = dbconnection_check.connection_check() - utility_cmd.check_testcase(result, "PS: Database connection") - - def sysbench_run(self, socket, db, node): + def sysbench_run(self, test_db): # Sysbench data load - sysbench = sysbench_run.SysbenchRun(BASEDIR, WORKDIR, - socket, debug) + sysbench = sysbench_run.SysbenchRun(self.node1, debug) - result = sysbench.sanity_check(db) - utility_cmd.check_testcase(result, node + ": Replication QA sysbench run sanity check") - result = sysbench.sysbench_load(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, SYSBENCH_NORMAL_TABLE_SIZE) - utility_cmd.check_testcase(result, node + ": Replication QA sysbench data load") + sysbench.test_sanity_check(test_db) + sysbench.test_sysbench_load(test_db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, SYSBENCH_NORMAL_TABLE_SIZE) if encryption == 'YES': for i in range(1, int(SYSBENCH_TABLE_COUNT) + 1): - encrypt_table = BASEDIR + '/bin/mysql --user=root ' \ - '--socket=' + socket + ' -e "' \ - ' alter table ' + db + '.sbtest' + str(i) + \ - " encryption='Y'" \ - '"; > /dev/null 2>&1' - if debug == 'YES': - print(encrypt_table) - os.system(encrypt_table) + self.node1.execute('alter table ' + test_db + '.sbtest' + str(i) + " encryption='Y'") - def data_load(self, db, socket, node): + def data_load(self, test_db): + queries = ["drop database if exists " + test_db, "create database " + test_db] + self.node1.execute_queries(queries) + utility_cmd.check_testcase(0, "PXC : Replication QA sample DB creation") # Random data load - if os.path.isfile(parent_dir + '/util/createsql.py'): - generate_sql = createsql.GenerateSQL('/tmp/dataload.sql', 1000) - generate_sql.OutFile() - generate_sql.CreateTable() - sys.stdout = sys.__stdout__ - create_db = self.basedir + "/bin/mysql --user=root --socket=" + \ - socket + ' -Bse"drop database if exists ' + db + \ - ';create database ' + db + ';" 2>&1' - if debug == 'YES': - print(create_db) - result = os.system(create_db) - utility_cmd.check_testcase(result, node + ": Replication QA sample DB creation") - data_load_query = self.basedir + "/bin/mysql --user=root --socket=" + \ - socket + ' ' + db + ' -f < /tmp/dataload.sql >/dev/null 2>&1' - if debug == 'YES': - print(data_load_query) - result = os.system(data_load_query) - utility_cmd.check_testcase(result, node + ": Replication QA sample data load") + if os.path.isfile(parent_dir + '/util/executesql.py'): + execute_sql = executesql.GenerateSQL(self.node1, test_db, 1000) + execute_sql.create_table() + utility_cmd.check_testcase(0, "PXC : Replication QA sample data load") + # Add prepared statement SQLs - create_ps = self.basedir + "/bin/mysql --user=root --socket=" + \ - socket + ' < ' + parent_dir + '/util/prepared_statements.sql > /dev/null 2>&1' - if debug == 'YES': - print(create_ps) - result = os.system(create_ps) - utility_cmd.check_testcase(result, node + ": Replication QA prepared statements dataload") + self.node1.execute_queries_from_file(parent_dir + '/util/prepared_statements.sql') + utility_cmd.check_testcase(0, "PXC : Replication QA prepared statements dataload") -replication_run = SetupReplication(BASEDIR, WORKDIR, NODE) -print("\nSetup replication using Percona Xtrabackup") -print("------------------------------------------") +utility.test_header("Setup replication using Percona Xtrabackup") +replication_run = SetupReplication() replication_run.start_pxc() -replication_run.sysbench_run(WORKDIR + '/node1/mysql.sock', 'pxcdb', 'PXC') -replication_run.data_load('pxc_dataload_db', WORKDIR + '/node1/mysql.sock', 'PXC') -replication_run.backup_pxc_node() -replication_run.start_slave('1') -utility_cmd.invoke_replication(BASEDIR, WORKDIR + '/node1/mysql.sock', PS1_SOCKET, 'backup_slave', 'none') -utility_cmd.replication_io_status(BASEDIR, PS1_SOCKET, 'PS', 'none') -utility_cmd.replication_sql_status(BASEDIR, PS1_SOCKET, 'PS', 'none') +replication_run.sysbench_run('pxcdb') +replication_run.data_load('pxc_dataload_db') +backup_dir = replication_run.backup_pxc_node() +replication_run.set_number_of_nodes(1) +replication_run.start_ps() +ps_node_1 = replication_run.ps_nodes[0] +utility_cmd.invoke_replication(replication_run.node1, ps_node_1, utility.RplType.BACKUP_REPLICA, + backup_dir=backup_dir) +utility_cmd.replication_io_status(ps_node_1, version) +utility_cmd.replication_sql_status(ps_node_1, version) -utility_cmd.stop_pxc(WORKDIR, BASEDIR, NODE) -utility_cmd.stop_ps(WORKDIR, BASEDIR, '1') +replication_run.shutdown_nodes() +replication_run.shutdown_nodes(replication_run.ps_nodes) \ No newline at end of file diff --git a/suite/replication/gtid_replication.py b/suite/replication/gtid_replication.py index 689ba86..1713a43 100755 --- a/suite/replication/gtid_replication.py +++ b/suite/replication/gtid_replication.py @@ -1,205 +1,31 @@ #!/usr/bin/env python3 import os import sys -import argparse + cwd = os.path.dirname(os.path.realpath(__file__)) parent_dir = os.path.normpath(os.path.join(cwd, '../../')) sys.path.insert(0, parent_dir) -from config import * -from util import pxc_startup -from util import db_connection -from util import sysbench_run -from util import ps_startup +from base_test import * +from suite.replication.replication import SetupReplication from util import utility -from util import createsql -from util import rqg_datagen - -# Read argument -parser = argparse.ArgumentParser(prog='PXC GTID replication test', usage='%(prog)s [options]') -parser.add_argument('-e', '--encryption-run', action='store_true', - help='This option will enable encryption options') -parser.add_argument('-d', '--debug', action='store_true', - help='This option will enable debug logging') -args = parser.parse_args() -if args.encryption_run is True: - encryption = 'YES' -else: - encryption = 'NO' -if args.debug is True: - debug = 'YES' -else: - debug = 'NO' - -utility_cmd = utility.Utility(debug) -utility_cmd.check_python_version() - -version = utility_cmd.version_check(BASEDIR) - -class SetupReplication: - def __init__(self, basedir, workdir, node): - self.basedir = basedir - self.workdir = workdir - self.node = node - - def start_pxc(self, my_extra=None): - """ Start Percona XtraDB Cluster. This method will - perform sanity checks for cluster startup - :param my_extra: We can pass extra PXC startup option - with this parameter - """ - # Start PXC cluster for replication test - if my_extra is None: - my_extra = '' - script_dir = os.path.dirname(os.path.realpath(__file__)) - dbconnection_check = db_connection.DbConnection(USER, WORKDIR + '/node1/mysql.sock') - server_startup = pxc_startup.StartCluster(parent_dir, WORKDIR, BASEDIR, int(self.node), debug) - result = server_startup.sanity_check() - utility_cmd.check_testcase(result, "PXC: Startup sanity check") - if encryption == 'YES': - result = server_startup.create_config('encryption') - utility_cmd.check_testcase(result, "PXC: Configuration file creation") - else: - result = server_startup.create_config('none') - utility_cmd.check_testcase(result, "PXC: Configuration file creation") - result = server_startup.initialize_cluster() - utility_cmd.check_testcase(result, "PXC: Initializing cluster") - result = server_startup.add_myextra_configuration(cwd + '/gtid_replication.cnf') - utility_cmd.check_testcase(result, "PXC: Adding custom configuration") - result = server_startup.start_cluster(my_extra) - utility_cmd.check_testcase(result, "PXC: Cluster startup") - result = dbconnection_check.connection_check() - utility_cmd.check_testcase(result, "PXC: Database connection") - - def start_ps(self, node, my_extra=None): - """ Start Percona Server. This method will - perform sanity checks for PS startup - :param my_extra: We can pass extra PS startup - option with this parameter - """ - if my_extra is None: - my_extra = '' - # Start PXC cluster for replication test - script_dir = os.path.dirname(os.path.realpath(__file__)) - dbconnection_check = db_connection.DbConnection(USER, PS1_SOCKET) - server_startup = ps_startup.StartPerconaServer(parent_dir, WORKDIR, BASEDIR, int(node), debug) - result = server_startup.sanity_check() - utility_cmd.check_testcase(result, "PS: Startup sanity check") - if encryption == 'YES': - result = server_startup.create_config('encryption') - utility_cmd.check_testcase(result, "PS: Configuration file creation") - else: - result = server_startup.create_config() - utility_cmd.check_testcase(result, "PS: Configuration file creation") - result = server_startup.add_myextra_configuration(cwd + '/gtid_replication.cnf') - utility_cmd.check_testcase(result, "PS: Adding custom configuration") - result = server_startup.initialize_cluster() - utility_cmd.check_testcase(result, "PS: Initializing cluster") - result = server_startup.start_server(my_extra) - utility_cmd.check_testcase(result, "PS: Cluster startup") - result = dbconnection_check.connection_check() - utility_cmd.check_testcase(result, "PS: Database connection") - - def sysbench_run(self, socket, db, node): - # Sysbench data load - sysbench = sysbench_run.SysbenchRun(BASEDIR, WORKDIR, - socket, debug) - - result = sysbench.sanity_check(db) - utility_cmd.check_testcase(result, node + ": Replication QA sysbench run sanity check") - result = sysbench.sysbench_load(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, SYSBENCH_NORMAL_TABLE_SIZE) - utility_cmd.check_testcase(result, node + ": Replication QA sysbench data load") - if encryption == 'YES': - for i in range(1, int(SYSBENCH_TABLE_COUNT) + 1): - encrypt_table = BASEDIR + '/bin/mysql --user=root ' \ - '--socket=' + socket + ' -e "' \ - ' alter table ' + db + '.sbtest' + str(i) + \ - " encryption='Y'" \ - '"; > /dev/null 2>&1' - if debug == 'YES': - print(encrypt_table) - os.system(encrypt_table) - - def data_load(self, db, socket, node): - # Random data load - if os.path.isfile(parent_dir + '/util/createsql.py'): - generate_sql = createsql.GenerateSQL('/tmp/dataload.sql', 1000) - generate_sql.OutFile() - generate_sql.CreateTable() - sys.stdout = sys.__stdout__ - create_db = self.basedir + "/bin/mysql --user=root --socket=" + \ - socket + ' -Bse"drop database if exists ' + db + \ - ';create database ' + db + ';" 2>&1' - if debug == 'YES': - print(create_db) - result = os.system(create_db) - utility_cmd.check_testcase(result, node + ": Replication QA sample DB creation") - data_load_query = self.basedir + "/bin/mysql --user=root --socket=" + \ - socket + ' ' + db + ' -f < /tmp/dataload.sql >/dev/null 2>&1' - if debug == 'YES': - print(data_load_query) - result = os.system(data_load_query) - utility_cmd.check_testcase(result, node + ": Replication QA sample data load") - # Add prepared statement SQLs - create_ps = self.basedir + "/bin/mysql --user=root --socket=" + \ - socket + ' < ' + parent_dir + '/util/prepared_statements.sql > /dev/null 2>&1' - if debug == 'YES': - print(create_ps) - result = os.system(create_ps) - utility_cmd.check_testcase(result, node + ": Replication QA prepared statements dataload") - - def replication_testcase(self, ps_node, master, slave, comment, master_socket, slave_socket): - if comment == "mtr": - self.start_pxc('--slave-parallel-workers=5') - self.start_ps(ps_node, '--slave-parallel-workers=5') - comment = 'none' - else: - self.start_pxc() - self.start_ps(ps_node) - if comment == "msr": - utility_cmd.invoke_replication(BASEDIR, PS1_SOCKET, - slave_socket, 'GTID', "for channel 'master1'") - utility_cmd.invoke_replication(BASEDIR, PS2_SOCKET, - slave_socket, 'GTID', "for channel 'master2'") - else: - utility_cmd.invoke_replication(BASEDIR, master_socket, - slave_socket, 'GTID', comment) - - replication_run.sysbench_run(master_socket, 'sbtest', master) - replication_run.data_load('ps_dataload_db', master_socket, master) - rqg_dataload = rqg_datagen.RQGDataGen(BASEDIR, WORKDIR, USER, debug) - rqg_dataload.pxc_dataload(master_socket) - if comment == "msr": - utility_cmd.replication_io_status(BASEDIR, slave_socket, slave, 'master1') - utility_cmd.replication_sql_status(BASEDIR, slave_socket, slave, 'master1') - utility_cmd.replication_io_status(BASEDIR, slave_socket, slave, 'master2') - utility_cmd.replication_sql_status(BASEDIR, slave_socket, slave, 'master2') - else: - utility_cmd.replication_io_status(BASEDIR, slave_socket, slave, comment) - utility_cmd.replication_sql_status(BASEDIR, slave_socket, slave, comment) - utility_cmd.stop_pxc(WORKDIR, BASEDIR, NODE) - utility_cmd.stop_ps(WORKDIR, BASEDIR, ps_node) +class SetupGtidReplication(SetupReplication): + def __init__(self): + super().__init__(rpl_type=utility.RplType.GTID) -replication_run = SetupReplication(BASEDIR, WORKDIR, NODE) +if __name__ == '__main__': -print("\nGTID PXC Node as Master and PS node as Slave") -print("----------------------------------------------") -replication_run.replication_testcase('1', 'PXC', 'PS', 'none', - WORKDIR + '/node1/mysql.sock', PS1_SOCKET) -print("\nGTID PXC Node as Slave and PS node as Master") -print("----------------------------------------------") -replication_run.replication_testcase('1', 'PS', 'PXC', 'none', PS1_SOCKET, - WORKDIR + '/node1/mysql.sock') + gtid_replication_run = SetupGtidReplication() -if int(version) > int("050700"): - print("\nGTID PXC multi source replication") - print("-----------------------------------") - replication_run.replication_testcase('2', 'PS', 'PXC', 'msr', PS1_SOCKET, - WORKDIR + '/node1/mysql.sock') - print("\nGTID PXC multi thread replication") - print("-----------------------------------") - replication_run.replication_testcase('1', 'PS', 'PXC', 'mtr', PS1_SOCKET, - WORKDIR + '/node1/mysql.sock') + utility.test_header("GTID PXC->PS async replication") + gtid_replication_run.replication_testcase(is_pxc_source=True) + utility.test_header("GTID PS->PXC async replication") + gtid_replication_run.replication_testcase() + if int(version) > int("050700"): + utility.test_header("GTID PS1->PXC, PS2->PXC Multi source replication") + gtid_replication_run.replication_testcase(2, comment='msr') + utility.test_header("GTID PS->PXC multi threaded async replication") + gtid_replication_run.replication_testcase(comment='mta') diff --git a/suite/replication/replication.py b/suite/replication/replication.py index 2c06eae..d8d567e 100755 --- a/suite/replication/replication.py +++ b/suite/replication/replication.py @@ -1,203 +1,106 @@ #!/usr/bin/env python3 import os import sys -import configparser -import argparse + + cwd = os.path.dirname(os.path.realpath(__file__)) parent_dir = os.path.normpath(os.path.join(cwd, '../../')) sys.path.insert(0, parent_dir) +from base_test import * from config import * -from util import pxc_startup -from util import db_connection +from util import executesql from util import sysbench_run -from util import ps_startup from util import utility -from util import createsql from util import rqg_datagen -# Read argument -parser = argparse.ArgumentParser(prog='PXC replication test', usage='%(prog)s [options]') -parser.add_argument('-e', '--encryption-run', action='store_true', - help='This option will enable encryption options') -parser.add_argument('-d', '--debug', action='store_true', - help='This option will enable debug logging') -args = parser.parse_args() -if args.encryption_run is True: - encryption = 'YES' -else: - encryption = 'NO' -if args.debug is True: - debug = 'YES' -else: - debug = 'NO' - -utility_cmd = utility.Utility(debug) -utility_cmd.check_python_version() - -version = utility_cmd.version_check(BASEDIR) - - -class SetupReplication: - def __init__(self, basedir, workdir, node): - self.basedir = basedir - self.workdir = workdir - self.node = node - - def start_pxc(self, my_extra=None): - """ Start Percona XtraDB Cluster. This method will - perform sanity checks for cluster startup - :param my_extra: We can pass extra PXC startup option - with this parameter - """ - # Start PXC cluster for replication test - if my_extra is None: - my_extra = '' - dbconnection_check = db_connection.DbConnection(USER, WORKDIR + '/node1/mysql.sock') - server_startup = pxc_startup.StartCluster(parent_dir, WORKDIR, BASEDIR, int(self.node), debug) - result = server_startup.sanity_check() - utility_cmd.check_testcase(result, "PXC: Startup sanity check") - if encryption == 'YES': - result = server_startup.create_config('encryption') - utility_cmd.check_testcase(result, "PXC: Configuration file creation") - else: - result = server_startup.create_config('none') - utility_cmd.check_testcase(result, "PXC: Configuration file creation") - result = server_startup.initialize_cluster() - utility_cmd.check_testcase(result, "PXC: Initializing cluster") - result = server_startup.add_myextra_configuration(cwd + '/replication.cnf') - utility_cmd.check_testcase(result, "PXC: Adding custom configuration") - result = server_startup.start_cluster(my_extra) - utility_cmd.check_testcase(result, "PXC: Cluster startup") - result = dbconnection_check.connection_check() - utility_cmd.check_testcase(result, "PXC: Database connection") - - def start_ps(self, node, my_extra=None): - """ Start Percona Server. This method will - perform sanity checks for PS startup - :param my_extra: We can pass extra PS startup - option with this parameter - """ - if my_extra is None: - my_extra = '' - # Start PXC cluster for replication test - dbconnection_check = db_connection.DbConnection(USER, PS1_SOCKET) - server_startup = ps_startup.StartPerconaServer(parent_dir, WORKDIR, BASEDIR, int(node), debug) - result = server_startup.sanity_check() - utility_cmd.check_testcase(result, "PS: Startup sanity check") - if encryption == 'YES': - result = server_startup.create_config('encryption') - utility_cmd.check_testcase(result, "PS: Configuration file creation") + +class SetupReplication(BaseTest): + def __init__(self, rpl_type=utility.RplType.GTID_LESS): + if rpl_type == utility.RplType.GTID: + extra_conf_file = cwd + '/gtid_replication.cnf' else: - result = server_startup.create_config() - utility_cmd.check_testcase(result, "PS: Configuration file creation") - result = server_startup.add_myextra_configuration(cwd + '/replication.cnf') - utility_cmd.check_testcase(result, "PS: Adding custom configuration") - result = server_startup.initialize_cluster() - utility_cmd.check_testcase(result, "PS: Initializing cluster") - result = server_startup.start_server(my_extra) - utility_cmd.check_testcase(result, "PS: Cluster startup") - result = dbconnection_check.connection_check() - utility_cmd.check_testcase(result, "PS: Database connection") - - def sysbench_run(self, socket, db, node): + extra_conf_file = cwd + '/replication.cnf' + super().__init__(extra_config_file=extra_conf_file) + self.rpl_type = rpl_type + + @staticmethod + def sysbench_run(node: DbConnection, test_db): # Sysbench data load - sysbench = sysbench_run.SysbenchRun(BASEDIR, WORKDIR, - socket, debug) + sysbench = sysbench_run.SysbenchRun(node, debug) - result = sysbench.sanity_check(db) - utility_cmd.check_testcase(result, node + ": Replication QA sysbench run sanity check") - result = sysbench.sysbench_load(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, SYSBENCH_NORMAL_TABLE_SIZE) - utility_cmd.check_testcase(result, node + ": Replication QA sysbench data load") + sysbench.test_sanity_check(test_db) + sysbench.test_sysbench_load(test_db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, SYSBENCH_NORMAL_TABLE_SIZE) if encryption == 'YES': for i in range(1, int(SYSBENCH_TABLE_COUNT) + 1): - encrypt_table = BASEDIR + '/bin/mysql --user=root ' \ - '--socket=' + socket + ' -e "' \ - ' alter table ' + db + '.sbtest' + str(i) + \ - " encryption='Y'" \ - '"; > /dev/null 2>&1' - if debug == 'YES': - print(encrypt_table) - os.system(encrypt_table) - - def data_load(self, db, socket, node): + node.execute('alter table ' + test_db + '.sbtest' + str(i) + " encryption='Y'") + + @staticmethod + def data_load(node: DbConnection, test_db): # Random data load - if os.path.isfile(parent_dir + '/util/createsql.py'): - generate_sql = createsql.GenerateSQL('/tmp/dataload.sql', 1000) - generate_sql.OutFile() - generate_sql.CreateTable() - sys.stdout = sys.__stdout__ - create_db = self.basedir + "/bin/mysql --user=root --socket=" + \ - socket + ' -Bse"drop database if exists ' + db + \ - ';create database ' + db + ';" 2>&1' - if debug == 'YES': - print(create_db) - result = os.system(create_db) - utility_cmd.check_testcase(result, node + ": Replication QA sample DB creation") - data_load_query = self.basedir + "/bin/mysql --user=root --socket=" + \ - socket + ' ' + db + ' -f < /tmp/dataload.sql >/dev/null 2>&1' - if debug == 'YES': - print(data_load_query) - result = os.system(data_load_query) - utility_cmd.check_testcase(result, node + ": Replication QA sample data load") - # Add prepared statement SQLs - create_ps = self.basedir + "/bin/mysql --user=root --socket=" + \ - socket + ' < ' + parent_dir + '/util/prepared_statements.sql > /dev/null 2>&1' - if debug == 'YES': - print(create_ps) - result = os.system(create_ps) - utility_cmd.check_testcase(result, node + ": Replication QA prepared statements dataload") - - def replication_testcase(self, ps_node, master, slave, comment, master_socket, slave_socket): - if comment == "mtr": - self.start_pxc('--slave-parallel-workers=5') - self.start_ps(ps_node, '--slave-parallel-workers=5') - comment = 'none' + queries = ["drop database if exists " + test_db, "create database " + test_db] + node.execute_queries(queries) + utility_cmd.check_testcase(0, "Replication QA sample DB creation") + + if os.path.isfile(parent_dir + '/util/executesql.py'): + execute_sql = executesql.GenerateSQL(node, test_db, 1000) + execute_sql.create_table() + utility_cmd.check_testcase(0, "Replication QA sample data load") + + node.execute_queries_from_file(parent_dir + '/util/prepared_statements.sql') + utility_cmd.check_testcase(0, "Replication QA prepared statements dataload") + + def replication_testcase(self, number_of_ps_nodes: int = 1, is_pxc_source: bool = False, comment: str = None): + if comment == "mta": + my_extra = ' --slave-parallel-workers=5' else: - self.start_pxc() - self.start_ps(ps_node) + my_extra = None + self.start_pxc(my_extra) + number_of_nodes = self.get_number_of_nodes() + self.set_number_of_nodes(number_of_ps_nodes) + self.start_ps(my_extra) + self.set_number_of_nodes(number_of_nodes) + + source_node = self.ps_nodes[0] + replica_node = self.node1 + + if is_pxc_source: + temp = source_node + source_node = replica_node + replica_node = temp + if comment == "msr": - utility_cmd.invoke_replication(BASEDIR, PS1_SOCKET, - slave_socket, 'NONGTID', "for channel 'master1'") - utility_cmd.invoke_replication(BASEDIR, PS2_SOCKET, - slave_socket, 'NONGTID', "for channel 'master2'") + utility_cmd.invoke_replication(source_node, replica_node, self.rpl_type, 'channel1') + utility_cmd.invoke_replication(self.ps_nodes[1], replica_node, self.rpl_type, 'channel2') else: - utility_cmd.invoke_replication(BASEDIR, master_socket, - slave_socket, 'NONGTID', comment) + utility_cmd.invoke_replication(source_node, replica_node, self.rpl_type) - replication_run.sysbench_run(master_socket, 'sbtest', master) - replication_run.data_load('ps_dataload_db', master_socket, master) - rqg_dataload = rqg_datagen.RQGDataGen(BASEDIR, WORKDIR, USER, debug) - rqg_dataload.pxc_dataload(master_socket) + self.sysbench_run(source_node, 'sbtest') + self.data_load(source_node, 'ps_dataload_db') + rqg_dataload = rqg_datagen.RQGDataGen(source_node, debug) + rqg_dataload.pxc_dataload(workdir) if comment == "msr": - utility_cmd.replication_io_status(BASEDIR, slave_socket, slave, 'master1') - utility_cmd.replication_sql_status(BASEDIR, slave_socket, slave, 'master1') - utility_cmd.replication_io_status(BASEDIR, slave_socket, slave, 'master2') - utility_cmd.replication_sql_status(BASEDIR, slave_socket, slave, 'master2') + utility_cmd.replication_io_status(replica_node, version, 'channel1') + utility_cmd.replication_sql_status(replica_node, version, 'channel1') + utility_cmd.replication_io_status(replica_node, version, 'channel2') + utility_cmd.replication_sql_status(replica_node, version, 'channel2') else: - utility_cmd.replication_io_status(BASEDIR, slave_socket, slave, comment) - utility_cmd.replication_sql_status(BASEDIR, slave_socket, slave, comment) - - utility_cmd.stop_pxc(WORKDIR, BASEDIR, NODE) - utility_cmd.stop_ps(WORKDIR, BASEDIR, ps_node) - - -replication_run = SetupReplication(BASEDIR, WORKDIR, NODE) -print("\nNON-GTID PXC Node as Master and PS node as Slave") -print("----------------------------------------------") -replication_run.replication_testcase('1', 'PXC', 'PS', 'none', - WORKDIR + '/node1/mysql.sock', PS1_SOCKET) -print("\nNON-GTID PXC Node as Slave and PS node as Master") -print("----------------------------------------------") -replication_run.replication_testcase('1', 'PS', 'PXC', 'none', PS1_SOCKET, - WORKDIR + '/node1/mysql.sock') - -if int(version) > int("050700"): - print("\nNON-GTID PXC multi source replication") - print("-----------------------------------") - replication_run.replication_testcase('2', 'PS', 'PXC', 'msr', PS1_SOCKET, - WORKDIR + '/node1/mysql.sock') - print("\nNON-GTID PXC multi thread replication") - print("-----------------------------------") - replication_run.replication_testcase('1', 'PS', 'PXC', 'mtr', PS1_SOCKET, - WORKDIR + '/node1/mysql.sock') + utility_cmd.replication_io_status(replica_node, version) + utility_cmd.replication_sql_status(replica_node, version) + + self.shutdown_nodes() + self.shutdown_nodes(self.ps_nodes) + + +if __name__ == '__main__': + replication_run = SetupReplication() + utility.test_header("NON-GTID PXC->PS async replication") + replication_run.replication_testcase(is_pxc_source=True) + utility.test_header("NON-GTID PS->PXC async replication") + replication_run.replication_testcase() + + if int(version) > int("050700"): + utility.test_header("NON-GTID PS1->PXC, PS2->PXC Multi source replication") + replication_run.replication_testcase(2, comment='msr') + utility.test_header("NON-GTID PS->PXC multi threaded async replication") + replication_run.replication_testcase(comment='mta') diff --git a/suite/ssl/encryption_qa.py b/suite/ssl/encryption_qa.py old mode 100644 new mode 100755 index 8eb2dd0..5af17c8 --- a/suite/ssl/encryption_qa.py +++ b/suite/ssl/encryption_qa.py @@ -2,144 +2,93 @@ import os import sys import itertools -import argparse + cwd = os.path.dirname(os.path.realpath(__file__)) parent_dir = os.path.normpath(os.path.join(cwd, '../../')) sys.path.insert(0, parent_dir) +from base_test import * from config import * -from util import sysbench_run +from util import sysbench_run, executesql from util import utility from util import table_checksum from util import rqg_datagen from util import pxc_startup -from util import db_connection -from util import createsql - -# Read argument -parser = argparse.ArgumentParser(prog='PXC replication test', usage='%(prog)s [options]') -parser.add_argument('-e', '--encryption-run', action='store_true', - help='This option will enable encryption options') -parser.add_argument('-d', '--debug', action='store_true', - help='This option will enable debug logging') -args = parser.parse_args() -if args.debug is True: - debug = 'YES' -else: - debug = 'NO' - -utility_cmd = utility.Utility(debug) -utility_cmd.check_python_version() - - -class EncryptionTest: - def sysbench_run(self, socket, db): + + +class EncryptionTest(BaseTest): + def __init__(self): + super().__init__(encrypt=True) + + def sysbench_run(self): # Sysbench data load - version = utility_cmd.version_check(BASEDIR) - checksum = "" if int(version) < int("080000"): - checksum = table_checksum.TableChecksum(PT_BASEDIR, BASEDIR, WORKDIR, NODE, socket, debug) - checksum.sanity_check() - - sysbench = sysbench_run.SysbenchRun(BASEDIR, WORKDIR, socket, debug) - result = sysbench.sanity_check(db) - utility_cmd.check_testcase(result, "Sysbench run sanity check") - result = sysbench.sysbench_load(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, SYSBENCH_LOAD_TEST_TABLE_SIZE) - utility_cmd.check_testcase(result, "Sysbench data load (threads : " + str(SYSBENCH_THREADS) + ")") + checksum = table_checksum.TableChecksum(self.node1, workdir, pt_basedir, debug) + checksum.sanity_check(self.pxc_nodes) + + sysbench = sysbench_run.SysbenchRun(self.node1, debug) + sysbench.test_sanity_check(db) + sysbench.test_sysbench_load(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, SYSBENCH_LOAD_TEST_TABLE_SIZE) sysbench.sysbench_ts_encryption(db, SYSBENCH_THREADS) def encryption_qa(self): # Encryption QA # Create data insert procedure - rqg_dataload = rqg_datagen.RQGDataGen(BASEDIR, WORKDIR, USER, debug) - - encryption_tmp_ts = ['innodb_temp_tablespace_encrypt=ON', 'innodb_temp_tablespace_encrypt=OFF'] - encryption_bin_log = ['binlog_encryption=ON', 'binlog_encryption=OFF'] - encryption_default_tbl = ['default_table_encryption=ON', 'default_table_encryption=OFF'] - encryption_redo_log = ['innodb_redo_log_encrypt=ON', 'innodb_redo_log_encrypt=OFF'] - encryption_undo_log = ['innodb_undo_log_encrypt=ON', 'innodb_undo_log_encrypt=OFF'] - encryption_sys_ts = ['innodb_sys_tablespace_encrypt=ON', 'innodb_sys_tablespace_encrypt=OFF'] - - for encryption_tmp_ts_value, encryption_bin_log_value, encryption_default_tbl_value, \ - encryption_redo_log_value, encryption_undo_log_value, encryption_sys_ts_value in \ - itertools.product(encryption_tmp_ts, encryption_bin_log, encryption_default_tbl, - encryption_redo_log, encryption_undo_log, encryption_sys_ts): - encryption_combination = encryption_tmp_ts_value + " " + encryption_bin_log_value + \ - " " + encryption_default_tbl_value + " " + encryption_redo_log_value + \ - " " + encryption_undo_log_value + " " + encryption_sys_ts_value - utility_cmd.check_testcase(0, "Encryption options : " + encryption_combination) + + option_values = ['ON', 'OFF'] + + loop_num = 0 + for val1, val2, val3, val4, val5, val6 in \ + itertools.product(option_values, repeat=6): # Start PXC cluster for encryption test - dbconnection_check = db_connection.DbConnection(USER, WORKDIR + '/node1/mysql.sock') - server_startup = pxc_startup.StartCluster(parent_dir, WORKDIR, BASEDIR, int(NODE), debug) - result = server_startup.sanity_check() - utility_cmd.check_testcase(result, "Startup sanity check") - result = server_startup.create_config('encryption') - utility_cmd.check_testcase(result, "Configuration file creation") - cnf_name = open(WORKDIR + '/conf/random_encryption.cnf', 'w+') - cnf_name.write('[mysqld]\n') - cnf_name.write("early-plugin-load=keyring_file.so" + '\n') - cnf_name.write("keyring_file_data=keyring" + '\n') - cnf_name.write(encryption_tmp_ts_value + '\n') - cnf_name.write(encryption_bin_log_value + '\n') - cnf_name.write(encryption_default_tbl_value + '\n') - cnf_name.write(encryption_redo_log_value + '\n') - cnf_name.write(encryption_undo_log_value + '\n') - cnf_name.write(encryption_sys_ts_value + '\n') - cnf_name.close() - for i in range(1, int(NODE) + 1): - os.system("sed -i 's#pxc_encrypt_cluster_traffic = OFF#pxc_encrypt_cluster_traffic = ON#g' " + - WORKDIR + '/conf/node' + str(i) + '.cnf') - n_name = open(WORKDIR + '/conf/node' + str(i) + '.cnf', 'a+') - n_name.write('!include ' + WORKDIR + '/conf/random_encryption.cnf\n') - n_name.close() - - if encryption_sys_ts_value == "innodb_sys_tablespace_encrypt=ON": - init_extra = "--innodb_sys_tablespace_encrypt=ON " \ - "--early-plugin-load=keyring_file.so " \ - " --keyring_file_data=keyring" - result = server_startup.initialize_cluster(init_extra) + server_startup = pxc_startup.StartCluster(self.get_number_of_nodes(), debug) + server_startup.sanity_check() + + options = {"default_table_encryption": val1, + "innodb_temp_tablespace_encrypt": val2, + "innodb_sys_tablespace_encrypt": val3, + "innodb_redo_log_encrypt": val4, + "innodb_undo_log_encrypt": val5, + "binlog_encryption": val6, + "early-plugin-load": "keyring_file.so", + "keyring_file_data": "keyring", + "encrypt_tmp_files": "ON"} + + server_startup.create_config('encryption', custom_conf_settings=options, + default_encryption_conf=False) + + if options['innodb_sys_tablespace_encrypt'] == 'ON': + init_extra = ("--innodb_sys_tablespace_encrypt=ON --early-plugin-load=keyring_file.so " + "--keyring_file_data=keyring") + server_startup.initialize_cluster(init_extra) else: - result = server_startup.initialize_cluster() - utility_cmd.check_testcase(result, "Initializing cluster") - result = server_startup.start_cluster() - utility_cmd.check_testcase(result, "Cluster startup") - result = dbconnection_check.connection_check() - utility_cmd.check_testcase(result, "Database connection") - self.sysbench_run(WORKDIR + '/node1/mysql.sock', 'test') - rqg_dataload.pxc_dataload(WORKDIR + '/node1/mysql.sock') + server_startup.initialize_cluster() + self.pxc_nodes = server_startup.start_cluster() + self.node1 = self.pxc_nodes[0] + self.node2 = self.pxc_nodes[1] + self.node1.test_connection_check() + self.sysbench_run() + rqg_dataload = rqg_datagen.RQGDataGen(self.node1, debug) + rqg_dataload.pxc_dataload(workdir) # Add prepared statement SQLs - create_ps = BASEDIR + "/bin/mysql --user=root --socket=" + \ - WORKDIR + '/node1/mysql.sock' + ' < ' + parent_dir + \ - '/util/prepared_statements.sql > /dev/null 2>&1' - if debug == 'YES': - print(create_ps) - result = os.system(create_ps) - utility_cmd.check_testcase(result, "Creating prepared statements") + self.node1.execute_queries_from_file(parent_dir + '/util/prepared_statements.sql') # Random data load - if os.path.isfile(parent_dir + '/util/createsql.py'): - generate_sql = createsql.GenerateSQL('/tmp/dataload.sql', 1000) - generate_sql.OutFile() - generate_sql.CreateTable() + if os.path.isfile(parent_dir + '/util/executesql.py'): + execute_sql = executesql.GenerateSQL(self.node1, db, 1000) + execute_sql.create_table() sys.stdout = sys.__stdout__ - data_load_query = BASEDIR + "/bin/mysql --user=root --socket=" + \ - WORKDIR + '/node1/mysql.sock' + ' test -f < /tmp/dataload.sql >/dev/null 2>&1' - if debug == 'YES': - print(data_load_query) - result = os.system(data_load_query) - utility_cmd.check_testcase(result, "Sample data load") # Checksum for tables in test DB for 8.0. - version = utility_cmd.version_check(BASEDIR) if int(version) >= int("080000"): - result = utility_cmd.check_table_count(BASEDIR, 'test', WORKDIR + '/node1/mysql.sock', - WORKDIR + '/node2/mysql.sock') - utility_cmd.check_testcase(result, "Checksum run for DB: test") + utility_cmd.test_table_count(self.node1, self.node2, db) - utility_cmd.stop_pxc(WORKDIR, BASEDIR, NODE) + self.shutdown_nodes() + loop_num += 1 + if loop_num == 6: + print("Successfully tested six combinations") + break -print("-----------------------") -print("PXC Encryption test") -print("-----------------------") +utility.test_header("PXC Encryption test") encryption_test = EncryptionTest() encryption_test.encryption_qa() diff --git a/suite/ssl/ssl_qa.py b/suite/ssl/ssl_qa.py index e93abad..ec77e71 100755 --- a/suite/ssl/ssl_qa.py +++ b/suite/ssl/ssl_qa.py @@ -1,135 +1,52 @@ #!/usr/bin/env python3 import os import sys -import argparse + cwd = os.path.dirname(os.path.realpath(__file__)) parent_dir = os.path.normpath(os.path.join(cwd, '../../')) sys.path.insert(0, parent_dir) +from base_test import * from config import * -from util import pxc_startup -from util import db_connection +from util import executesql from util import sysbench_run from util import utility -from util import createsql from util import rqg_datagen from util import table_checksum -# Read argument -parser = argparse.ArgumentParser(prog='PXC SSL test', usage='%(prog)s [options]') -parser.add_argument('-e', '--encryption-run', action='store_true', - help='This option will enable encryption options') -parser.add_argument('-d', '--debug', action='store_true', - help='This option will enable debug logging') -args = parser.parse_args() -if args.encryption_run is True: - encryption = 'YES' -else: - encryption = 'NO' -if args.debug is True: - debug = 'YES' -else: - debug = 'NO' - -utility_cmd = utility.Utility(debug) -utility_cmd.check_python_version() - -class SSLCheck: - def __init__(self, basedir, workdir, user, node1_socket, node): - self.workdir = workdir - self.basedir = basedir - self.user = user - self.socket = node1_socket - self.node = node +class SSLCheck(BaseTest): + def __init__(self): + super().__init__(ssl=True) - def run_query(self, query): - query_status = os.system(query) - if int(query_status) != 0: - print("ERROR! Query execution failed: " + query) - return 1 - return 0 + def sysbench_run(self, test_db): + sysbench = sysbench_run.SysbenchRun(self.node1, debug) - def start_pxc(self): - # Start PXC cluster for SSL test - dbconnection_check = db_connection.DbConnection(USER, self.socket) - server_startup = pxc_startup.StartCluster(parent_dir, WORKDIR, BASEDIR, int(NODE), debug) - result = server_startup.sanity_check() - utility_cmd.check_testcase(result, "Startup sanity check") - if encryption == 'YES': - result = server_startup.create_config('encryption') - utility_cmd.check_testcase(result, "Configuration file creation") - else: - result = server_startup.create_config('ssl') - utility_cmd.check_testcase(result, "Configuration file creation") - result = server_startup.initialize_cluster() - utility_cmd.check_testcase(result, "Initializing cluster") - result = server_startup.start_cluster() - utility_cmd.check_testcase(result, "Cluster startup") - result = dbconnection_check.connection_check() - utility_cmd.check_testcase(result, "Database connection") - - def sysbench_run(self, node1_socket, db): - sysbench = sysbench_run.SysbenchRun(BASEDIR, WORKDIR, - WORKDIR + '/node1/mysql.sock', debug) - - result = sysbench.sanity_check(db) - utility_cmd.check_testcase(result, "SSL QA sysbench run sanity check") - result = sysbench.sysbench_load(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, - SYSBENCH_NORMAL_TABLE_SIZE) - utility_cmd.check_testcase(result, "SSL QA sysbench data load") + sysbench.test_sanity_check(test_db) + sysbench.test_sysbench_load(test_db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, SYSBENCH_NORMAL_TABLE_SIZE) if encryption == 'YES': for i in range(1, int(SYSBENCH_THREADS) + 1): - encrypt_table = BASEDIR + '/bin/mysql --user=root ' \ - '--socket=/tmp/node1.sock -e "' \ - ' alter table ' + db + '.sbtest' + str(i) + \ - " encryption='Y'" \ - '"; > /dev/null 2>&1' - if debug == 'YES': - print(encrypt_table) - os.system(encrypt_table) + self.node1.execute(' alter table ' + test_db + '.sbtest' + str(i) + " encryption='Y'") - def data_load(self, db, node1_socket): - if os.path.isfile(parent_dir + '/util/createsql.py'): - generate_sql = createsql.GenerateSQL('/tmp/dataload.sql', 1000) - generate_sql.OutFile() - generate_sql.CreateTable() - sys.stdout = sys.__stdout__ - create_db = self.basedir + "/bin/mysql --user=root --socket=" + \ - node1_socket + ' -Bse"drop database if exists ' + db + \ - ';create database ' + db + ';" 2>&1' - if debug == 'YES': - print(create_db) - result = os.system(create_db) - utility_cmd.check_testcase(result, "SSL QA sample DB creation") - data_load_query = self.basedir + "/bin/mysql --user=root --socket=" + \ - node1_socket + ' ' + db + ' -f < /tmp/dataload.sql >/dev/null 2>&1' - if debug == 'YES': - print(data_load_query) - result = os.system(data_load_query) - utility_cmd.check_testcase(result, "SSL QA sample data load") + def data_load(self, test_db): + queries = ['drop database if exists ' + test_db, 'create database ' + test_db] + self.node1.execute_queries(queries) + if os.path.isfile(parent_dir + '/util/executesql.py'): + execute_sql = executesql.GenerateSQL(self.node1, test_db, 1000) + execute_sql.create_table() + utility_cmd.check_testcase(0, "SSL QA sample data load") -print("\nPXC SSL test") -print("--------------") -ssl_run = SSLCheck(BASEDIR, WORKDIR, USER, WORKDIR + '/node1/mysql.sock', NODE) +utility.test_header("PXC SSL test") +ssl_run = SSLCheck() ssl_run.start_pxc() -ssl_run.sysbench_run(WORKDIR + '/node1/mysql.sock', 'sbtest') -ssl_run.data_load('pxc_dataload_db', WORKDIR + '/node1/mysql.sock') -version = utility_cmd.version_check(BASEDIR) -if int(version) > int("050700"): - rqg_dataload = rqg_datagen.RQGDataGen(BASEDIR, WORKDIR, 'rqg_test', debug) -else: - rqg_dataload = rqg_datagen.RQGDataGen(BASEDIR, WORKDIR, USER, debug) -rqg_dataload.initiate_rqg('examples', 'test', WORKDIR + '/node1/mysql.sock') +ssl_run.sysbench_run('sbtest') +ssl_run.data_load('pxc_dataload_db') +rqg_dataload = rqg_datagen.RQGDataGen(ssl_run.node1, debug) +rqg_dataload.initiate_rqg('examples', 'test', workdir) if int(version) < int("080000"): - checksum = table_checksum.TableChecksum(PT_BASEDIR, BASEDIR, WORKDIR, - NODE, WORKDIR + '/node1/mysql.sock', debug) - checksum.sanity_check() + checksum = table_checksum.TableChecksum(ssl_run.node1, workdir, pt_basedir, debug) + checksum.sanity_check(ssl_run.pxc_nodes) checksum.data_consistency('test,pxc_dataload_db') else: - result = utility_cmd.check_table_count(BASEDIR, 'test', WORKDIR + '/node1/mysql.sock', - WORKDIR + '/node2/mysql.sock') - utility_cmd.check_testcase(result, "Checksum run for DB: test") - result = utility_cmd.check_table_count(BASEDIR, 'pxc_dataload_db', WORKDIR + '/node1/mysql.sock', - WORKDIR + '/node2/mysql.sock') - utility_cmd.check_testcase(result, "Checksum run for DB: pxc_dataload_db") + utility_cmd.test_table_count(ssl_run.node1, ssl_run.node2, 'sbtest') + utility_cmd.test_table_count(ssl_run.node1, ssl_run.node2, 'pxc_dataload_db') \ No newline at end of file diff --git a/suite/sysbench_run/sysbench_customized_dataload_test.py b/suite/sysbench_run/sysbench_customized_dataload_test.py index 4131510..e057496 100755 --- a/suite/sysbench_run/sysbench_customized_dataload_test.py +++ b/suite/sysbench_run/sysbench_customized_dataload_test.py @@ -1,69 +1,38 @@ #!/usr/bin/env python3 import os import sys -import argparse + cwd = os.path.dirname(os.path.realpath(__file__)) parent_dir = os.path.normpath(os.path.join(cwd, '../../')) sys.path.insert(0, parent_dir) -from config import * +from base_test import * from util import sysbench_run from util import utility from util import table_checksum -# Read argument -parser = argparse.ArgumentParser(prog='PXC sysbench customized dataload test', usage='%(prog)s [options]') -parser.add_argument('-e', '--encryption-run', action='store_true', - help='This option will enable encryption options') -parser.add_argument('-d', '--debug', action='store_true', - help='This option will enable debug logging') -args = parser.parse_args() -if args.encryption_run is True: - encryption = 'YES' -else: - encryption = 'NO' -if args.debug is True: - debug = 'YES' -else: - debug = 'NO' - -utility_cmd = utility.Utility(debug) -utility_cmd.check_python_version() - -class SysbenchLoadTest: - def start_server(self, socket, node): - if SERVER == "pxc": - my_extra = "--innodb_buffer_pool_size=8G --innodb_log_file_size=1G" - utility_cmd.start_pxc(parent_dir, WORKDIR, BASEDIR, node, socket, USER, encryption, my_extra) - elif SERVER == "ps": - my_extra = "--innodb_buffer_pool_size=8G --innodb_log_file_size=1G" - utility_cmd.start_ps(parent_dir, WORKDIR, BASEDIR, node, socket, USER, encryption, my_extra) +class SysbenchLoadTest(BaseTest): + def __init__(self): + super().__init__(my_extra="--max-connections=1500 --innodb_buffer_pool_size=8G --innodb_log_file_size=1G") - def sysbench_run(self, socket, db): + def sysbench_run(self, node: DbConnection): # Sysbench load test - threads = [32, 64, 128] - version = utility_cmd.version_check(BASEDIR) if int(version) < int("080000"): - checksum = table_checksum.TableChecksum(PT_BASEDIR, BASEDIR, WORKDIR, NODE, socket, debug) - checksum.sanity_check() - sysbench = sysbench_run.SysbenchRun(BASEDIR, WORKDIR, - socket, debug) - result = sysbench.sanity_check(db) - utility_cmd.check_testcase(result, "Sysbench run sanity check") - result = sysbench.sysbench_custom_table(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, - SYSBENCH_CUSTOMIZED_DATALOAD_TABLE_SIZE) - utility_cmd.check_testcase(result, "Sysbench data load") + checksum = table_checksum.TableChecksum(node, workdir, pt_basedir, debug) + checksum.sanity_check(self.pxc_nodes) + sysbench = sysbench_run.SysbenchRun(node, debug) + sysbench.test_sanity_check(db) + sysbench.test_sysbench_custom_table(db) -print("----------------------------------------") -print("\nPXC sysbench customized data load test") -print("----------------------------------------") +utility.test_header("PXC sysbench customized data load test") sysbench_loadtest = SysbenchLoadTest() -if SERVER == "pxc": - sysbench_loadtest.start_server(WORKDIR + '/node1/mysql.sock', NODE) - sysbench_loadtest.sysbench_run(WORKDIR + '/node1/mysql.sock', 'test') - utility_cmd.stop_pxc(WORKDIR, BASEDIR, NODE) -elif SERVER == "ps": - sysbench_loadtest.start_server(PS1_SOCKET, 1) - sysbench_loadtest.sysbench_run(PS1_SOCKET, 'test') - utility_cmd.stop_ps(WORKDIR, BASEDIR, 1) +if server == "pxc": + sysbench_loadtest.start_pxc() + sysbench_loadtest.sysbench_run(sysbench_loadtest.node1) + sysbench_loadtest.shutdown_nodes() +elif server == "ps": + sysbench_loadtest.set_number_of_nodes(1) + sysbench_loadtest.start_ps() + sysbench_loadtest.sysbench_run(sysbench_loadtest.ps_nodes[0]) + sysbench_loadtest.shutdown_nodes(sysbench_loadtest.ps_nodes) diff --git a/suite/sysbench_run/sysbench_oltp_test.py b/suite/sysbench_run/sysbench_oltp_test.py index ac43a10..f0fde22 100755 --- a/suite/sysbench_run/sysbench_oltp_test.py +++ b/suite/sysbench_run/sysbench_oltp_test.py @@ -1,76 +1,45 @@ #!/usr/bin/env python3 import os import sys -import argparse -import time + cwd = os.path.dirname(os.path.realpath(__file__)) parent_dir = os.path.normpath(os.path.join(cwd, '../../')) sys.path.insert(0, parent_dir) -from config import * -from util import pxc_startup -from util import db_connection +from base_test import * from util import sysbench_run from util import utility from util import table_checksum -# Read argument -parser = argparse.ArgumentParser(prog='PXC sysbench oltp test', usage='%(prog)s [options]') -parser.add_argument('-e', '--encryption-run', action='store_true', - help='This option will enable encryption options') -parser.add_argument('-d', '--debug', action='store_true', - help='This option will enable debug logging') -args = parser.parse_args() -if args.encryption_run is True: - encryption = 'YES' -else: - encryption = 'NO' -if args.debug is True: - debug = 'YES' -else: - debug = 'NO' - -utility_cmd = utility.Utility(debug) -utility_cmd.check_python_version() - -class SysbenchOLTPTest: - def start_server(self, socket, node): - if SERVER == "pxc": - my_extra = "--innodb_buffer_pool_size=8G --innodb_log_file_size=1G" - utility_cmd.start_pxc(parent_dir, WORKDIR, BASEDIR, node, socket, USER, encryption, my_extra) - elif SERVER == "ps": - my_extra = "--innodb_buffer_pool_size=8G --innodb_log_file_size=1G" - utility_cmd.start_ps(parent_dir, WORKDIR, BASEDIR, node, socket, USER, encryption, my_extra) +class SysbenchOLTPTest(BaseTest): + def __init__(self): + super().__init__(my_extra="--max-connections=1500 --innodb_buffer_pool_size=8G --innodb_log_file_size=1G") - def sysbench_run(self, socket, db): + def sysbench_run(self, nodes: list[DbConnection]): # Sysbench OLTP Test - threads = [32, 64, 128] - version = utility_cmd.version_check(BASEDIR) - checksum = "" + threads = [32, 128] #64 if int(version) < int("080000"): - checksum = table_checksum.TableChecksum(PT_BASEDIR, BASEDIR, WORKDIR, NODE, socket, debug) - checksum.sanity_check() - sysbench = sysbench_run.SysbenchRun(BASEDIR, WORKDIR, - socket, debug) + checksum = table_checksum.TableChecksum(nodes[0], workdir, pt_basedir, debug) + checksum.sanity_check(nodes) + sysbench = sysbench_run.SysbenchRun(nodes[0], debug) for thread in threads: - result = sysbench.sanity_check(db) - utility_cmd.check_testcase(result, "Sysbench run sanity check") - sysbench.sysbench_custom_oltp_load(db, 5, thread, SYSBENCH_OLTP_TEST_TABLE_SIZE) + sysbench.test_sanity_check(db) + sysbench.sysbench_custom_oltp_load(db, 5, thread) time.sleep(5) - result = utility_cmd.check_table_count(BASEDIR, db, socket, WORKDIR + '/node2/mysql.sock') - utility_cmd.check_testcase(result, "Checksum run for DB: " + db) + if len(nodes) > 1: + utility_cmd.test_table_count(nodes[0], nodes[1], db) -print("------------------------") -print("\nPXC sysbench oltp test") -print("------------------------") -sysbench_loadtest = SysbenchOLTPTest() -if SERVER == "pxc": - sysbench_loadtest.start_server(WORKDIR + '/node1/mysql.sock', NODE) - sysbench_loadtest.sysbench_run(WORKDIR + '/node1/mysql.sock', 'test') - utility_cmd.stop_pxc(WORKDIR, BASEDIR, NODE) -elif SERVER == "ps": - sysbench_loadtest.start_server(PS1_SOCKET, 1) - sysbench_loadtest.sysbench_run(PS1_SOCKET, 'test') - utility_cmd.stop_ps(WORKDIR, BASEDIR, 1) +db = 'test' +utility.test_header("PXC sysbench oltp test") +sysbench_loadtest = SysbenchOLTPTest() +if server == "pxc": + sysbench_loadtest.start_pxc() + sysbench_loadtest.sysbench_run(sysbench_loadtest.pxc_nodes) + sysbench_loadtest.shutdown_nodes() +elif server == "ps": + sysbench_loadtest.set_number_of_nodes(1) + sysbench_loadtest.start_ps() + sysbench_loadtest.sysbench_run(sysbench_loadtest.ps_nodes) + sysbench_loadtest.shutdown_nodes(sysbench_loadtest.ps_nodes) diff --git a/suite/sysbench_run/sysbench_read_only_test.py b/suite/sysbench_run/sysbench_read_only_test.py index 7989e0d..9b686e4 100755 --- a/suite/sysbench_run/sysbench_read_only_test.py +++ b/suite/sysbench_run/sysbench_read_only_test.py @@ -1,72 +1,44 @@ #!/usr/bin/env python3 import os import sys -import argparse -import time + cwd = os.path.dirname(os.path.realpath(__file__)) parent_dir = os.path.normpath(os.path.join(cwd, '../../')) sys.path.insert(0, parent_dir) -from config import * +from base_test import * from util import sysbench_run from util import utility from util import table_checksum -# Read argument -parser = argparse.ArgumentParser(prog='PXC sysbench read only test', usage='%(prog)s [options]') -parser.add_argument('-e', '--encryption-run', action='store_true', - help='This option will enable encryption options') -parser.add_argument('-d', '--debug', action='store_true', - help='This option will enable debug logging') -args = parser.parse_args() -if args.encryption_run is True: - encryption = 'YES' -else: - encryption = 'NO' -if args.debug is True: - debug = 'YES' -else: - debug = 'NO' - -utility_cmd = utility.Utility(debug) -utility_cmd.check_python_version() +class SysbenchReadOnlyTest(BaseTest): + def __init__(self): + super().__init__(my_extra="--max-connections=1500 --innodb_buffer_pool_size=8G --innodb_log_file_size=1G") -class SysbenchReadOnlyTest: - def start_server(self, socket, node): - if SERVER == "pxc": - my_extra = "--innodb_buffer_pool_size=8G --innodb_log_file_size=1G" - utility_cmd.start_pxc(parent_dir, WORKDIR, BASEDIR, node, socket, USER, encryption, my_extra) - elif SERVER == "ps": - my_extra = "--innodb_buffer_pool_size=8G --innodb_log_file_size=1G" - utility_cmd.start_ps(parent_dir, WORKDIR, BASEDIR, node, socket, USER, encryption, my_extra) - - def sysbench_run(self, socket, db): + def sysbench_run(self, nodes: list[DbConnection]): # Sysbench load test - threads = [32, 64, 128] - version = utility_cmd.version_check(BASEDIR) + threads = [32, 128] # 64 if int(version) < int("080000"): - checksum = table_checksum.TableChecksum(PT_BASEDIR, BASEDIR, WORKDIR, NODE, socket, debug) - checksum.sanity_check() - sysbench = sysbench_run.SysbenchRun(BASEDIR, WORKDIR, - socket, debug) + checksum = table_checksum.TableChecksum(nodes[0], workdir, pt_basedir, debug) + checksum.sanity_check(nodes) + sysbench = sysbench_run.SysbenchRun(nodes[0], debug) for thread in threads: - result = sysbench.sanity_check(db) - utility_cmd.check_testcase(result, "Sysbench run sanity check") - sysbench.sysbench_custom_read_qa(db, 5, thread, SYSBENCH_READ_QA_TABLE_SIZE) + sysbench.test_sanity_check(db) + sysbench.sysbench_custom_read_qa(db, 5, thread) time.sleep(5) - result = utility_cmd.check_table_count(BASEDIR, db, socket, WORKDIR + '/node2/mysql.sock') - utility_cmd.check_testcase(result, "Checksum run for DB: " + db) + if len(nodes) > 1: + utility_cmd.test_table_count(nodes[0], nodes[1], db) -print("-----------------------------") -print("\nPXC sysbench read only test") -print("-----------------------------") +utility.test_header("PXC sysbench read only test") sysbench_loadtest = SysbenchReadOnlyTest() -if SERVER == "pxc": - sysbench_loadtest.start_server(WORKDIR + '/node1/mysql.sock', NODE) - sysbench_loadtest.sysbench_run(WORKDIR + '/node1/mysql.sock', 'test') - utility_cmd.stop_pxc(WORKDIR, BASEDIR, NODE) -elif SERVER == "ps": - sysbench_loadtest.start_server(PS1_SOCKET, 1) - sysbench_loadtest.sysbench_run(PS1_SOCKET, 'test') - utility_cmd.stop_pxc(WORKDIR, BASEDIR, 1) + +if server == "pxc": + sysbench_loadtest.start_pxc() + sysbench_loadtest.sysbench_run(sysbench_loadtest.pxc_nodes) + sysbench_loadtest.shutdown_nodes() +elif server == "ps": + sysbench_loadtest.set_number_of_nodes(1) + sysbench_loadtest.start_ps() + sysbench_loadtest.sysbench_run(sysbench_loadtest.ps_nodes) + sysbench_loadtest.shutdown_nodes(sysbench_loadtest.ps_nodes) diff --git a/suite/upgrade/pxc_replication_upgrade.py b/suite/upgrade/pxc_replication_upgrade.py old mode 100644 new mode 100755 index 4e8b56f..4fe0a99 --- a/suite/upgrade/pxc_replication_upgrade.py +++ b/suite/upgrade/pxc_replication_upgrade.py @@ -1,151 +1,55 @@ #!/usr/bin/env python3 import os import sys -import argparse -import subprocess -import time -from datetime import datetime + cwd = os.path.dirname(os.path.realpath(__file__)) parent_dir = os.path.normpath(os.path.join(cwd, '../../')) sys.path.insert(0, parent_dir) +from base_test import * from config import * -from util import pxc_startup -from util import ps_startup -from util import db_connection from util import sysbench_run from util import utility from util import rqg_datagen -# Read argument -parser = argparse.ArgumentParser(prog='PXC upgrade test', usage='%(prog)s [options]') -parser.add_argument('-e', '--encryption-run', action='store_true', - help='This option will enable encryption options') -parser.add_argument('-d', '--debug', action='store_true', - help='This option will enable debug logging') -args = parser.parse_args() -if args.encryption_run is True: - encryption = 'YES' -else: - encryption = 'NO' -if args.debug is True: - debug = 'YES' -else: - debug = 'NO' - -utility_cmd = utility.Utility(debug) -utility_cmd.check_python_version() +def get_rpl_conf(rpl_type): + if rpl_type == utility.RplType.GTID_LESS: + return parent_dir + '/suite/replication/replication.cnf' + else: + return parent_dir + '/suite/replication/gtid_replication.cnf' -class PXCUpgrade: - def startup(self, replication_conf): - # Start PXC cluster for upgrade test - dbconnection_check = db_connection.DbConnection(USER, WORKDIR + '/node1/mysql.sock') - server_startup = pxc_startup.StartCluster(parent_dir, WORKDIR, PXC_LOWER_BASE, int(NODE), debug) - result = server_startup.sanity_check() - utility_cmd.check_testcase(result, "Startup sanity check") - if encryption == 'YES': - result = server_startup.create_config('encryption') - utility_cmd.check_testcase(result, "Configuration file creation") - else: - result = server_startup.create_config('none') - utility_cmd.check_testcase(result, "Configuration file creation") - result = server_startup.add_myextra_configuration(parent_dir + '/suite/replication/' + replication_conf) - utility_cmd.check_testcase(result, "PXC: Adding custom configuration") - result = server_startup.initialize_cluster() - utility_cmd.check_testcase(result, "Initializing cluster") - result = server_startup.start_cluster() - utility_cmd.check_testcase(result, "Cluster startup") - result = dbconnection_check.connection_check() - utility_cmd.check_testcase(result, "Database connection") - def start_ps(self, node, replication_conf, my_extra=None): - """ Start Percona Server. This method will - perform sanity checks for PS startup - :param my_extra: We can pass extra PS startup - option with this parameter - """ - if my_extra is None: - my_extra = '' - # Start PXC cluster for replication test - dbconnection_check = db_connection.DbConnection(USER, PS1_SOCKET) - server_startup = ps_startup.StartPerconaServer(parent_dir, WORKDIR, PXC_LOWER_BASE, int(node), debug) - result = server_startup.sanity_check() - utility_cmd.check_testcase(result, "PS: Startup sanity check") - if encryption == 'YES': - result = server_startup.create_config('encryption') - utility_cmd.check_testcase(result, "PS: Configuration file creation") - else: - result = server_startup.create_config() - utility_cmd.check_testcase(result, "PS: Configuration file creation") - result = server_startup.add_myextra_configuration(parent_dir + '/suite/replication/' + replication_conf) - utility_cmd.check_testcase(result, "PS: Adding custom configuration") - result = server_startup.initialize_cluster() - utility_cmd.check_testcase(result, "PS: Initializing cluster") - result = server_startup.start_server(my_extra) - utility_cmd.check_testcase(result, "PS: Server startup") - result = dbconnection_check.connection_check() - utility_cmd.check_testcase(result, "PS: Database connection") +class PXCUpgrade(BaseTest): + def __init__(self): + super().__init__(vers=Version.LOWER) - def sysbench_run(self, node1_socket, db, upgrade_type): + def sysbench_run(self, db, upgrade_type: str): # Sysbench dataload for consistency test - sysbench_node1 = sysbench_run.SysbenchRun(PXC_LOWER_BASE, WORKDIR, - node1_socket, debug) + sysbench_ps_node1 = sysbench_run.SysbenchRun(self.ps_nodes[0], debug) - result = sysbench_node1.sanity_check(db) - utility_cmd.check_testcase(result, "Sysbench run sanity check") - result = sysbench_node1.sysbench_load(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, SYSBENCH_NORMAL_TABLE_SIZE) - utility_cmd.check_testcase(result, "Sysbench data load") - version = utility_cmd.version_check(PXC_LOWER_BASE) - if int(version) > int("050700"): + sysbench_ps_node1.test_sanity_check(db) + sysbench_ps_node1.test_sysbench_load(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, SYSBENCH_NORMAL_TABLE_SIZE) + if int(low_version_num) > int("050700"): if encryption == 'YES': for i in range(1, int(SYSBENCH_TABLE_COUNT) + 1): - encrypt_table = PXC_LOWER_BASE + '/bin/mysql --user=root ' \ - '--socket=' + WORKDIR + '/node1/mysql.sock -e "' \ - ' alter table ' + db + '.sbtest' + str(i) + \ - " encryption='Y'" \ - '"; > /dev/null 2>&1' - if debug == 'YES': - print(encrypt_table) - os.system(encrypt_table) - sysbench_node2 = sysbench_run.SysbenchRun(PXC_LOWER_BASE, WORKDIR, - WORKDIR + '/node2/mysql.sock', debug) - sysbench_node3 = sysbench_run.SysbenchRun(PXC_LOWER_BASE, WORKDIR, - WORKDIR + '/node3/mysql.sock', debug) + self.ps_nodes[0].execute('alter table ' + db + '.sbtest' + str(i) + " encryption='Y'") + sysbench_node1 = sysbench_run.SysbenchRun(self.node1, debug) + sysbench_node2 = sysbench_run.SysbenchRun(self.node2, debug) + sysbench_node3 = sysbench_run.SysbenchRun(self.node3, debug) if upgrade_type == 'readwrite': - result = sysbench_node1.sysbench_oltp_read_write(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, - SYSBENCH_NORMAL_TABLE_SIZE, 1000, 'Yes') - utility_cmd.check_testcase(result, "Initiated sysbench oltp run on node1") - result = sysbench_node2.sysbench_oltp_read_write(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, - SYSBENCH_NORMAL_TABLE_SIZE, 1000, 'Yes') - utility_cmd.check_testcase(result, "Initiated sysbench oltp run on node2") - result = sysbench_node3.sysbench_oltp_read_write(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, - SYSBENCH_NORMAL_TABLE_SIZE, 1000, 'Yes') - utility_cmd.check_testcase(result, "Initiated sysbench oltp run on node3") + sysbench_node1.test_sysbench_oltp_read_write(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, + SYSBENCH_NORMAL_TABLE_SIZE, 1000, True) + sysbench_node2.test_sysbench_oltp_read_write(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, + SYSBENCH_NORMAL_TABLE_SIZE, 1000, True) + sysbench_node3.test_sysbench_oltp_read_write(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, + SYSBENCH_NORMAL_TABLE_SIZE, 1000, True) elif upgrade_type == 'readonly': - result = sysbench_node1.sysbench_oltp_read_only(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, - SYSBENCH_NORMAL_TABLE_SIZE, 1000, 'Yes') - utility_cmd.check_testcase(result, "Initiated sysbench readonly run on node1") - result = sysbench_node2.sysbench_oltp_read_only(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, - SYSBENCH_NORMAL_TABLE_SIZE, 1000, 'Yes') - utility_cmd.check_testcase(result, "Initiated sysbench readonly run on node2") - result = sysbench_node3.sysbench_oltp_read_only(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, - SYSBENCH_NORMAL_TABLE_SIZE, 1000, 'Yes') - utility_cmd.check_testcase(result, "Initiated sysbench readonly run on node3") - - def startup_check(self, cluster_node): - """ This method will check the node - startup status. - """ - ping_query = PXC_LOWER_BASE + '/bin/mysqladmin --user=root --socket=' + \ - WORKDIR + '/node' + str(cluster_node) + \ - '/mysql.sock ping > /dev/null 2>&1' - for startup_timer in range(120): - time.sleep(1) - ping_check = subprocess.call(ping_query, shell=True, stderr=subprocess.DEVNULL) - ping_status = ("{}".format(ping_check)) - if int(ping_status) == 0: - utility_cmd.check_testcase(int(ping_status), "Node startup is successful") - break # break the loop if mysqld is running + sysbench_node1.test_sysbench_oltp_read_only(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, + SYSBENCH_NORMAL_TABLE_SIZE, 1000, True) + sysbench_node2.test_sysbench_oltp_read_only(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, + SYSBENCH_NORMAL_TABLE_SIZE, 1000, True) + sysbench_node3.test_sysbench_oltp_read_only(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, + SYSBENCH_NORMAL_TABLE_SIZE, 1000, True) def rolling_upgrade(self, upgrade_type): """ This function will upgrade @@ -153,141 +57,56 @@ def rolling_upgrade(self, upgrade_type): latest version and perform table checksum. """ - self.sysbench_run('/tmp/psnode1.sock', 'sbtest', upgrade_type) + self.sysbench_run('sbtest', upgrade_type) time.sleep(10) - for i in range(int(NODE), 0, -1): - query = "ps -ef | grep sysbench | grep -v gep | grep node" + \ - str(i) + " | awk '{print $2}'" - sysbench_pid = os.popen(query).read().rstrip() - kill_sysbench = "kill -9 " + sysbench_pid + " > /dev/null 2>&1" - if debug == 'YES': - print("Terminating sysbench run : " + kill_sysbench) - os.system(kill_sysbench) - shutdown_node = PXC_LOWER_BASE + '/bin/mysqladmin --user=root --socket=' + \ - WORKDIR + '/node' + str(i) + \ - '/mysql.sock shutdown > /dev/null 2>&1' - if debug == 'YES': - print(shutdown_node) - result = os.system(shutdown_node) - utility_cmd.check_testcase(result, "Shutdown cluster node" + str(i) + " for upgrade testing") - version = utility_cmd.version_check(PXC_UPPER_BASE) - if int(version) > int("080000"): - os.system("sed -i '/wsrep_sst_auth=root:/d' " + WORKDIR + '/conf/node' + str(i) + '.cnf') - startup_cmd = PXC_UPPER_BASE + '/bin/mysqld --defaults-file=' + \ - WORKDIR + '/conf/node' + str(i) + '.cnf --wsrep-provider=' + \ - PXC_UPPER_BASE + '/lib/libgalera_smm.so --datadir=' + \ - WORKDIR + '/node' + str(i) + ' --basedir=' + PXC_UPPER_BASE + ' --log-error=' + \ - WORKDIR + '/log/upgrade_node' + str(i) + '.err >> ' + \ - WORKDIR + '/log/upgrade_node' + str(i) + '.err 2>&1 &' - utility_cmd.check_testcase(0, "Starting cluster node" + str(i) + " with upgraded version") - else: - startup_cmd = PXC_UPPER_BASE + '/bin/mysqld --defaults-file=' + \ - WORKDIR + '/conf/node' + str(i) + '.cnf --datadir=' + \ - WORKDIR + '/node' + str(i) + ' --basedir=' + PXC_UPPER_BASE + \ - ' --wsrep-provider=none --log-error=' + \ - WORKDIR + '/log/upgrade_node' + str(i) + '.err >> ' + \ - WORKDIR + '/log/upgrade_node' + str(i) + '.err 2>&1 &' - if debug == 'YES': - print(startup_cmd) - os.system(startup_cmd) - self.startup_check(i) - if int(version) < int("080000"): - upgrade_cmd = PXC_UPPER_BASE + '/bin/mysql_upgrade -uroot --socket=' + \ - WORKDIR + '/node' + str(i) + \ - '/mysql.sock > ' + WORKDIR + '/log/node' + str(i) + '_upgrade.log 2>&1' - if debug == 'YES': - print(upgrade_cmd) - result = os.system(upgrade_cmd) - utility_cmd.check_testcase(result, "Cluster node" + str(i) + " upgrade is successful") - shutdown_node = PXC_UPPER_BASE + '/bin/mysqladmin --user=root --socket=' + \ - WORKDIR + '/node' + str(i) + \ - '/mysql.sock shutdown > /dev/null 2>&1' - if debug == 'YES': - print(shutdown_node) - result = os.system(shutdown_node) - utility_cmd.check_testcase(result, "Shutdown cluster node" + str(i) + " after upgrade run") - create_startup = 'sed "s#' + PXC_LOWER_BASE + '#' + PXC_UPPER_BASE + \ - '#g" ' + WORKDIR + '/log/startup' + str(i) + '.sh > ' + \ - WORKDIR + '/log/upgrade_startup' + str(i) + '.sh' - if debug == 'YES': - print(create_startup) - os.system(create_startup) - if i == 1: - remove_bootstrap_option = 'sed -i "s#--wsrep-new-cluster##g" ' + \ - WORKDIR + '/log/upgrade_startup' + str(i) + '.sh' - if debug == 'YES': - print(remove_bootstrap_option) - os.system(remove_bootstrap_option) - time.sleep(5) - - upgrade_startup = "bash " + WORKDIR + \ - '/log/upgrade_startup' + str(i) + '.sh' - if debug == 'YES': - print(upgrade_startup) - result = os.system(upgrade_startup) - utility_cmd.check_testcase(result, "Starting cluster node" + str(i) + " after upgrade run") - self.startup_check(i) + for node in [self.node3, self.node2, self.node1]: + sysbench_pid = utility.sysbech_node_pid(node.get_node_number()) + utility_cmd.kill_process(sysbench_pid, "sysbench", True) + pxc_startup.StartCluster.upgrade_pxc_node(node, debug) time.sleep(10) - utility_cmd.replication_io_status(BASEDIR, WORKDIR + '/node3/mysql.sock', 'PXC slave', 'none') - utility_cmd.replication_sql_status(BASEDIR, WORKDIR + '/node3/mysql.sock', 'PXC slave', 'none') - sysbench_node = sysbench_run.SysbenchRun(PXC_LOWER_BASE, WORKDIR, - WORKDIR + '/node1/mysql.sock', debug) - result = sysbench_node.sysbench_oltp_read_write('sbtest', SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, - SYSBENCH_NORMAL_TABLE_SIZE, 100) - utility_cmd.check_testcase(result, "Sysbench oltp run after upgrade") + utility_cmd.replication_io_status(self.node3, version) + utility_cmd.replication_sql_status(self.node3, version) + sysbench_node = sysbench_run.SysbenchRun(self.node1, debug) + sysbench_node.test_sysbench_oltp_read_write('sbtest', SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, + SYSBENCH_NORMAL_TABLE_SIZE, 100) time.sleep(15) - result = utility_cmd.check_table_count(PXC_UPPER_BASE, 'sbtest', - WORKDIR + '/node1/mysql.sock', - WORKDIR + '/node2/mysql.sock') - utility_cmd.check_testcase(result, "Checksum run for DB: sbtest") - result = utility_cmd.check_table_count(PXC_UPPER_BASE, 'db_galera', - WORKDIR + '/node1/mysql.sock', - WORKDIR + '/node2/mysql.sock') - utility_cmd.check_testcase(result, "Checksum run for DB: db_galera") - result = utility_cmd.check_table_count(PXC_UPPER_BASE, 'db_transactions', - WORKDIR + '/node1/mysql.sock', - WORKDIR + '/node2/mysql.sock') - utility_cmd.check_testcase(result, "Checksum run for DB: db_transactions") - result = utility_cmd.check_table_count(PXC_UPPER_BASE, 'db_partitioning', - WORKDIR + '/node1/mysql.sock', - WORKDIR + '/node2/mysql.sock') - utility_cmd.check_testcase(result, "Checksum run for DB: db_partitioning") + utility_cmd.test_table_count(self.node1, self.node2, 'sbtest') + utility_cmd.test_table_count(self.node1, self.node2, 'db_galera') + utility_cmd.test_table_count(self.node1, self.node2, 'db_transactions') + utility_cmd.test_table_count(self.node1, self.node2, 'db_partitioning') - utility_cmd.stop_pxc(WORKDIR, PXC_UPPER_BASE, NODE) - utility_cmd.stop_ps(WORKDIR, PXC_LOWER_BASE, 1) + self.shutdown_nodes() + self.shutdown_nodes(self.ps_nodes) -query = PXC_LOWER_BASE + "/bin/mysqld --version 2>&1 | grep -oe '[0-9]\.[0-9][\.0-9]*' | head -n1" -lower_version = os.popen(query).read().rstrip() -query = PXC_UPPER_BASE + "/bin/mysqld --version 2>&1 | grep -oe '[0-9]\.[0-9][\.0-9]*' | head -n1" -upper_version = os.popen(query).read().rstrip() -version = utility_cmd.version_check(PXC_UPPER_BASE) -print('------------------------------------------------------------------------------------') -print("\nPXC Asyc non-gtid replication upgrade test : Upgrading from PXC-" + lower_version + - " to PXC-" + upper_version) -print('------------------------------------------------------------------------------------') +utility.test_header("PXC Asyc non-gtid replication upgrade test : Upgrading from PXC-" + lower_version + + " to PXC-" + upper_version) upgrade_qa = PXCUpgrade() -upgrade_qa.startup('replication.cnf') -upgrade_qa.start_ps('1', 'replication.cnf') -utility_cmd.invoke_replication(PXC_LOWER_BASE, '/tmp/psnode1.sock', - WORKDIR + '/node3/mysql.sock', 'NONGTID', 'none') -utility_cmd.replication_io_status(PXC_LOWER_BASE, WORKDIR + '/node3/mysql.sock', 'PXC slave', 'none') -utility_cmd.replication_sql_status(PXC_LOWER_BASE, WORKDIR + '/node3/mysql.sock', 'PXC slave', 'none') -rqg_dataload = rqg_datagen.RQGDataGen(PXC_LOWER_BASE, WORKDIR, USER, debug) -rqg_dataload.pxc_dataload(WORKDIR + '/node1/mysql.sock') +upgrade_qa.set_extra_conf_file(get_rpl_conf(utility.RplType.GTID_LESS)) +upgrade_qa.start_pxc() +saved_number_of_nodes = upgrade_qa.get_number_of_nodes() +upgrade_qa.set_number_of_nodes(1) +upgrade_qa.start_ps() +upgrade_qa.set_number_of_nodes(saved_number_of_nodes) +utility_cmd.invoke_replication(upgrade_qa.ps_nodes[0], upgrade_qa.node3, utility.RplType.GTID_LESS) +utility_cmd.replication_io_status(upgrade_qa.node3, low_version_num) +utility_cmd.replication_sql_status(upgrade_qa.node3, low_version_num) +rqg_dataload = rqg_datagen.RQGDataGen(upgrade_qa.node1, debug) +rqg_dataload.pxc_dataload(workdir) upgrade_qa.rolling_upgrade('none') -print('------------------------------------------------------------------------------------') -print("\nPXC Asyc gtid replication upgrade test : Upgrading from PXC-" + lower_version + - " to PXC-" + upper_version) -print('------------------------------------------------------------------------------------') -upgrade_qa.startup('gtid_replication.cnf') -upgrade_qa.start_ps('1', 'gtid_replication.cnf') -utility_cmd.invoke_replication(PXC_LOWER_BASE, '/tmp/psnode1.sock', - WORKDIR + '/node3/mysql.sock', 'GTID', 'none') -utility_cmd.replication_io_status(PXC_LOWER_BASE, WORKDIR + '/node3/mysql.sock', 'PXC slave', 'none') -utility_cmd.replication_sql_status(PXC_LOWER_BASE, WORKDIR + '/node3/mysql.sock', 'PXC slave', 'none') -rqg_dataload = rqg_datagen.RQGDataGen(PXC_LOWER_BASE, WORKDIR, USER, debug) -rqg_dataload.pxc_dataload(WORKDIR + '/node1/mysql.sock') +utility.test_header("PXC Asyc gtid replication upgrade test : Upgrading from PXC-" + lower_version + + " to PXC-" + upper_version) +upgrade_qa.set_extra_conf_file(get_rpl_conf(utility.RplType.GTID)) +upgrade_qa.start_pxc() +saved_number_of_nodes = upgrade_qa.get_number_of_nodes() +upgrade_qa.set_number_of_nodes(1) +upgrade_qa.start_ps() +upgrade_qa.set_number_of_nodes(saved_number_of_nodes) +utility_cmd.invoke_replication(upgrade_qa.ps_nodes[0], upgrade_qa.node3, utility.RplType.GTID) +utility_cmd.replication_io_status(upgrade_qa.node3, low_version_num) +utility_cmd.replication_sql_status(upgrade_qa.node3, low_version_num) +rqg_dataload = rqg_datagen.RQGDataGen(upgrade_qa.node1, debug) +rqg_dataload.pxc_dataload(workdir) upgrade_qa.rolling_upgrade('none') diff --git a/suite/upgrade/pxc_upgrade.py b/suite/upgrade/pxc_upgrade.py index e87afe4..4a83f5b 100755 --- a/suite/upgrade/pxc_upgrade.py +++ b/suite/upgrade/pxc_upgrade.py @@ -1,190 +1,52 @@ #!/usr/bin/env python3 import os import sys -import argparse -import subprocess -import time -import shutil -from datetime import datetime + cwd = os.path.dirname(os.path.realpath(__file__)) parent_dir = os.path.normpath(os.path.join(cwd, '../../')) sys.path.insert(0, parent_dir) +from base_test import * from config import * -from util import pxc_startup -from util import db_connection from util import sysbench_run from util import utility from util import rqg_datagen -# Read argument -parser = argparse.ArgumentParser(prog='PXC upgrade test', usage='%(prog)s [options]') -parser.add_argument('-e', '--encryption-run', action='store_true', - help='This option will enable encryption options') -parser.add_argument('-d', '--debug', action='store_true', - help='This option will enable debug logging') -args = parser.parse_args() -if args.encryption_run is True: - encryption = 'YES' -else: - encryption = 'NO' -if args.debug is True: - debug = 'YES' -else: - debug = 'NO' - -utility_cmd = utility.Utility(debug) -utility_cmd.check_python_version() +class PXCUpgrade(BaseTest): + def __init__(self): + super().__init__(vers=Version.LOWER) -class PXCUpgrade: - def startup(self, wsrep_extra=None): + def join_higher_version_node(self): # Start PXC cluster for upgrade test - dbconnection_check = db_connection.DbConnection(USER, WORKDIR + '/node1/mysql.sock') - server_startup = pxc_startup.StartCluster(parent_dir, WORKDIR, PXC_LOWER_BASE, int(NODE), debug) - result = server_startup.sanity_check() - utility_cmd.check_testcase(result, "Startup sanity check") - if encryption == 'YES': - if wsrep_extra is not None: - result = server_startup.create_config('encryption', - 'gcache.keep_pages_size=5;' - 'gcache.page_size=1024M;gcache.size=1024M;') - else: - result = server_startup.create_config('encryption') - utility_cmd.check_testcase(result, "Configuration file creation") - else: - if wsrep_extra is not None: - result = server_startup.create_config('none', 'gcache.keep_pages_size=5;' - 'gcache.page_size=1024M;gcache.size=1024M;') - else: - result = server_startup.create_config('none') - utility_cmd.check_testcase(result, "Configuration file creation") - result = server_startup.initialize_cluster() - utility_cmd.check_testcase(result, "Initializing cluster") - result = server_startup.start_cluster() - utility_cmd.check_testcase(result, "Cluster startup") - result = dbconnection_check.connection_check() - utility_cmd.check_testcase(result, "Database connection") + self.pxc_nodes.append(pxc_startup.StartCluster.join_new_upgraded_node(self.node3, 4, debug)) - def startup_check(self, cluster_node): - """ This method will check the node - startup status. - """ - ping_query = PXC_LOWER_BASE + '/bin/mysqladmin --user=root --socket=' + \ - WORKDIR + '/node' + str(cluster_node) + \ - '/mysql.sock ping > /dev/null 2>&1' - for startup_timer in range(300): - time.sleep(1) - ping_check = subprocess.call(ping_query, shell=True, stderr=subprocess.DEVNULL) - ping_status = ("{}".format(ping_check)) - if int(ping_status) == 0: - version = utility_cmd.version_check(PXC_UPPER_BASE) - if int(version) > int("080000"): - wsrep_status = "" - while wsrep_status != "Synced": - status_query = BASEDIR + '/bin/mysql --user=root --socket=' + \ - WORKDIR + '/node' + str(cluster_node) + \ - '/mysql.sock -Bse"show status like ' \ - "'wsrep_local_state_comment'\" 2>&1 | awk \'{print $2}\'" - wsrep_status = os.popen(status_query).read().rstrip() - utility_cmd.check_testcase(int(ping_status), "Node startup is successful" - "(Node status:" + wsrep_status + ")") - break # break the loop if mysqld is running - if startup_timer > 298: - utility_cmd.check_testcase(0, "ERROR! Node is not synced with cluster. " - "Check the error log to get more info") - exit(1) - - def start_upper_version(self): - # Start PXC cluster for upgrade test - shutil.copy(WORKDIR + '/conf/node3.cnf', - WORKDIR + '/conf/node4.cnf') - query = PXC_LOWER_BASE + '/bin/mysql --user=root --socket=' + WORKDIR + \ - '/node3/mysql.sock -Bse"show variables like \'wsrep_cluster_address\';"' \ - ' 2>/dev/null | awk \'{print $2}\'' - wsrep_cluster_addr = os.popen(query).read().rstrip() - query = PXC_LOWER_BASE + "/bin/mysql --user=root --socket=" + \ - WORKDIR + '/node3/mysql.sock -Bse"select @@port" 2>&1' - port_no = os.popen(query).read().rstrip() - wsrep_port_no = int(port_no) + 108 - port_no = int(port_no) + 100 - os.system("sed -i 's#node3#node4#g' " + WORKDIR + '/conf/node4.cnf') - os.system("sed -i '/wsrep_sst_auth=root:/d' " + WORKDIR + '/conf/node4.cnf') - os.system("sed -i '0,/^[ \\t]*wsrep_cluster_address[ \\t]*=.*$/s|" - "^[ \\t]*wsrep_cluster_address[ \\t]*=.*$|wsrep_cluster_address=" - + wsrep_cluster_addr + "127.0.0.1:" + str(wsrep_port_no) + "|' " - + WORKDIR + '/conf/node4.cnf') - os.system("sed -i '0,/^[ \\t]*port[ \\t]*=.*$/s|" - "^[ \\t]*port[ \\t]*=.*$|port=" - + str(port_no) + "|' " + WORKDIR + '/conf/node4.cnf') - os.system('sed -i "0,/^[ \\t]*wsrep_provider_options[ \\t]*=.*$/s|' - "^[ \\t]*wsrep_provider_options[ \\t]*=.*$|wsrep_provider_options=" - "'gmcast.listen_addr=tcp://127.0.0.1:" + str(wsrep_port_no) + "'" - '|" ' + WORKDIR + '/conf/node4.cnf') - os.system("sed -i '0,/^[ \\t]*server_id[ \\t]*=.*$/s|" - "^[ \\t]*server_id[ \\t]*=.*$|server_id=" - "14|' " + WORKDIR + '/conf/node4.cnf') - create_startup = 'sed "s#' + PXC_LOWER_BASE + '#' + PXC_UPPER_BASE + \ - '#g" ' + WORKDIR + '/log/startup3.sh > ' + \ - WORKDIR + '/log/startup4.sh' - if debug == 'YES': - print(create_startup) - os.system(create_startup) - os.system("sed -i 's#node3#node4#g' " + WORKDIR + '/log/startup4.sh') - os.system("rm -rf " + WORKDIR + '/node4') - os.mkdir(WORKDIR + '/node4') - upgrade_startup = "bash " + WORKDIR + \ - '/log/startup4.sh' - if debug == 'YES': - print(upgrade_startup) - result = os.system(upgrade_startup) - utility_cmd.check_testcase(result, "Starting PXC-8.0 cluster node4 for upgrade testing") - self.startup_check(4) - - def sysbench_run(self, node1_socket, db, upgrade_type): + def sysbench_run(self, upgrade_type): # Sysbench dataload for consistency test - sysbench_node1 = sysbench_run.SysbenchRun(PXC_LOWER_BASE, WORKDIR, - node1_socket, debug) + sysbench_node1 = sysbench_run.SysbenchRun(self.node1, debug) - result = sysbench_node1.sanity_check(db) - utility_cmd.check_testcase(result, "Sysbench run sanity check") - result = sysbench_node1.sysbench_load(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, SYSBENCH_NORMAL_TABLE_SIZE) - utility_cmd.check_testcase(result, "Sysbench data load") - version = utility_cmd.version_check(PXC_LOWER_BASE) - if int(version) > int("050700"): + sysbench_node1.test_sanity_check(db) + sysbench_node1.test_sysbench_load(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, SYSBENCH_NORMAL_TABLE_SIZE) + if int(low_version_num) > int("050700"): if encryption == 'YES': for i in range(1, int(SYSBENCH_TABLE_COUNT) + 1): - encrypt_table = PXC_LOWER_BASE + '/bin/mysql --user=root ' \ - '--socket=' + WORKDIR + '/node1/mysql.sock -e "' \ - ' alter table ' + db + '.sbtest' + str(i) + \ - " encryption='Y'" \ - '"; > /dev/null 2>&1' - if debug == 'YES': - print(encrypt_table) - os.system(encrypt_table) - sysbench_node2 = sysbench_run.SysbenchRun(PXC_LOWER_BASE, WORKDIR, - WORKDIR + '/node2/mysql.sock', debug) - sysbench_node3 = sysbench_run.SysbenchRun(PXC_LOWER_BASE, WORKDIR, - WORKDIR + '/node3/mysql.sock', debug) + self.node1.execute('alter table ' + db + '.sbtest' + str(i) + " encryption='Y'") + + sysbench_node2 = sysbench_run.SysbenchRun(self.node2, debug) + sysbench_node3 = sysbench_run.SysbenchRun(self.node3, debug) if upgrade_type == 'readwrite' or upgrade_type == 'readwrite_sst': - result = sysbench_node1.sysbench_oltp_read_write(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, - SYSBENCH_NORMAL_TABLE_SIZE, 1000, 'Yes') - utility_cmd.check_testcase(result, "Initiated sysbench oltp run on node1") - result = sysbench_node2.sysbench_oltp_read_write(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, - SYSBENCH_NORMAL_TABLE_SIZE, 1000, 'Yes') - utility_cmd.check_testcase(result, "Initiated sysbench oltp run on node2") - result = sysbench_node3.sysbench_oltp_read_write(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, - SYSBENCH_NORMAL_TABLE_SIZE, 1000, 'Yes') - utility_cmd.check_testcase(result, "Initiated sysbench oltp run on node3") + sysbench_node1.test_sysbench_oltp_read_write(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, + SYSBENCH_NORMAL_TABLE_SIZE, 1000, True) + sysbench_node2.test_sysbench_oltp_read_write(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, + SYSBENCH_NORMAL_TABLE_SIZE, 1000, True) + sysbench_node3.test_sysbench_oltp_read_write(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, + SYSBENCH_NORMAL_TABLE_SIZE, 1000, True) elif upgrade_type == 'readonly': - result = sysbench_node1.sysbench_oltp_read_only(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, - SYSBENCH_NORMAL_TABLE_SIZE, 1000, 'Yes') - utility_cmd.check_testcase(result, "Initiated sysbench readonly run on node1") - result = sysbench_node2.sysbench_oltp_read_only(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, - SYSBENCH_NORMAL_TABLE_SIZE, 1000, 'Yes') - utility_cmd.check_testcase(result, "Initiated sysbench readonly run on node2") - result = sysbench_node3.sysbench_oltp_read_only(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, - SYSBENCH_NORMAL_TABLE_SIZE, 1000, 'Yes') - utility_cmd.check_testcase(result, "Initiated sysbench readonly run on node3") + sysbench_node1.test_sysbench_oltp_read_only(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, + SYSBENCH_NORMAL_TABLE_SIZE, 1000, True) + sysbench_node2.test_sysbench_oltp_read_only(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, + SYSBENCH_NORMAL_TABLE_SIZE, 1000, True) + sysbench_node3.test_sysbench_oltp_read_only(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, + SYSBENCH_NORMAL_TABLE_SIZE, 1000, True) def rolling_upgrade(self, upgrade_type): """ This function will upgrade @@ -192,180 +54,72 @@ def rolling_upgrade(self, upgrade_type): latest version and perform table checksum. """ - self.sysbench_run(WORKDIR + '/node1/mysql.sock', 'test', upgrade_type) + self.sysbench_run(upgrade_type) time.sleep(5) - for i in range(int(NODE), 0, -1): - query = "ps -ef | grep sysbench | grep -v gep | grep node" + \ - str(i) + " | awk '{print $2}'" - sysbench_pid = os.popen(query).read().rstrip() - kill_sysbench = "kill -9 " + sysbench_pid + " > /dev/null 2>&1" - if debug == 'YES': - print("Terminating sysbench run : " + kill_sysbench) - os.system(kill_sysbench) - shutdown_node = PXC_LOWER_BASE + '/bin/mysqladmin --user=root --socket=' + \ - WORKDIR + '/node' + str(i) + \ - '/mysql.sock shutdown > /dev/null 2>&1' - if debug == 'YES': - print(shutdown_node) - result = os.system(shutdown_node) - utility_cmd.check_testcase(result, "Shutdown cluster node" + str(i) + " for upgrade testing") - if i == 3: - if upgrade_type == 'readwrite_sst': - sysbench_node1 = sysbench_run.SysbenchRun(PXC_LOWER_BASE, WORKDIR, WORKDIR + - '/node1/mysql.sock', debug) - sysbench_node1.sanity_check('test_one') - sysbench_node1.sanity_check('test_two') - sysbench_node1.sanity_check('test_three') - result = sysbench_node1.sysbench_load('test_one', SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, - SYSBENCH_LOAD_TEST_TABLE_SIZE) - utility_cmd.check_testcase(result, "Sysbench data load(DB: test_one)") - result = sysbench_node1.sysbench_load('test_two', SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, - SYSBENCH_LOAD_TEST_TABLE_SIZE) - utility_cmd.check_testcase(result, "Sysbench data load(DB: test_two)") - result = sysbench_node1.sysbench_load('test_three', SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, - SYSBENCH_LOAD_TEST_TABLE_SIZE) - utility_cmd.check_testcase(result, "Sysbench data load(DB: test_three)") - - version = utility_cmd.version_check(PXC_UPPER_BASE) - if int(version) > int("080000"): - os.system("sed -i '/wsrep_sst_auth=root:/d' " + WORKDIR + '/conf/node' + str(i) + '.cnf') - os.system("sed -i 's#wsrep_slave_threads=8#wsrep_slave_threads=30#g' " + WORKDIR + - '/conf/node' + str(i) + '.cnf') - startup_cmd = PXC_UPPER_BASE + '/bin/mysqld --defaults-file=' + \ - WORKDIR + '/conf/node' + str(i) + '.cnf --datadir=' + \ - WORKDIR + '/node' + str(i) + ' --basedir=' + PXC_UPPER_BASE + \ - ' --wsrep-provider=' + PXC_UPPER_BASE + \ - '/lib/libgalera_smm.so --log-error=' + \ - WORKDIR + '/log/upgrade_node' + str(i) + '.err >> ' + \ - WORKDIR + '/log/upgrade_node' + str(i) + '.err 2>&1 &' + for node in [self.node3, self.node2, self.node1]: + sysbench_pid = utility.sysbech_node_pid(node.get_node_number()) + utility_cmd.kill_process(sysbench_pid, "sysbench run", True) + if node == self.node3 and upgrade_type == 'readwrite_sst': + node_to_add_load = self.node1 else: - startup_cmd = PXC_UPPER_BASE + '/bin/mysqld --defaults-file=' + \ - WORKDIR + '/conf/node' + str(i) + '.cnf --datadir=' + \ - WORKDIR + '/node' + str(i) + ' --basedir=' + PXC_UPPER_BASE + \ - ' --wsrep-provider=none --log-error=' + \ - WORKDIR + '/log/upgrade_node' + str(i) + '.err >> ' + \ - WORKDIR + '/log/upgrade_node' + str(i) + '.err 2>&1 &' - if debug == 'YES': - print(startup_cmd) - os.system(startup_cmd) - self.startup_check(i) - if int(version) < int("080000"): - upgrade_cmd = PXC_UPPER_BASE + '/bin/mysql_upgrade -uroot --socket=' + \ - WORKDIR + '/node' + str(i) + \ - '/mysql.sock > ' + WORKDIR + '/log/node' + str(i) + '_upgrade.log 2>&1' - if debug == 'YES': - print(upgrade_cmd) - result = os.system(upgrade_cmd) - utility_cmd.check_testcase(result, "Cluster node" + str(i) + " upgrade is successful") - shutdown_node = PXC_UPPER_BASE + '/bin/mysqladmin --user=root --socket=' + \ - WORKDIR + '/node' + str(i) + \ - '/mysql.sock shutdown > /dev/null 2>&1' - if debug == 'YES': - print(shutdown_node) - result = os.system(shutdown_node) - utility_cmd.check_testcase(result, "Shutdown cluster node" + str(i) + " after upgrade run") - create_startup = 'sed "s#' + PXC_LOWER_BASE + '#' + PXC_UPPER_BASE + \ - '#g" ' + WORKDIR + '/log/startup' + str(i) + '.sh > ' + \ - WORKDIR + '/log/upgrade_startup' + str(i) + '.sh' - if debug == 'YES': - print(create_startup) - os.system(create_startup) - if i == 1: - remove_bootstrap_option = 'sed -i "s#--wsrep-new-cluster##g" ' + \ - WORKDIR + '/log/upgrade_startup' + str(i) + '.sh' - if debug == 'YES': - print(remove_bootstrap_option) - os.system(remove_bootstrap_option) - time.sleep(5) - - upgrade_startup = "bash " + WORKDIR + \ - '/log/upgrade_startup' + str(i) + '.sh' - if debug == 'YES': - print(upgrade_startup) - result = os.system(upgrade_startup) - utility_cmd.check_testcase(result, "Starting cluster node" + str(i) + " after upgrade run") - self.startup_check(i) - time.sleep(10) - sysbench_node = sysbench_run.SysbenchRun(PXC_LOWER_BASE, WORKDIR, - WORKDIR + '/node1/mysql.sock', debug) - result = sysbench_node.sysbench_oltp_read_write('test', SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, - SYSBENCH_NORMAL_TABLE_SIZE, 100) - utility_cmd.check_testcase(result, "Sysbench oltp run after upgrade") + node_to_add_load = None + cnf_replace = {"wsrep_slave_threads": "30"} + if 'readwrite' in upgrade_type: + pxc_startup.StartCluster.upgrade_pxc_node(node, debug, node_to_add_load, cnf_replace, 600) + else: + pxc_startup.StartCluster.upgrade_pxc_node(node, debug, node_to_add_load, cnf_replace) + time.sleep(60) + sysbench_node = sysbench_run.SysbenchRun(self.node1, debug) + sysbench_node.test_sysbench_oltp_read_write(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, + SYSBENCH_NORMAL_TABLE_SIZE, 100) time.sleep(5) - result = utility_cmd.check_table_count(PXC_UPPER_BASE, 'test', - WORKDIR + '/node1/mysql.sock', - WORKDIR + '/node2/mysql.sock') - utility_cmd.check_testcase(result, "Checksum run for DB: test") - result = utility_cmd.check_table_count(PXC_UPPER_BASE, 'db_galera', - WORKDIR + '/node1/mysql.sock', - WORKDIR + '/node2/mysql.sock') - utility_cmd.check_testcase(result, "Checksum run for DB: db_galera") - result = utility_cmd.check_table_count(PXC_UPPER_BASE, 'db_transactions', - WORKDIR + '/node1/mysql.sock', - WORKDIR + '/node2/mysql.sock') - utility_cmd.check_testcase(result, "Checksum run for DB: db_transactions") - result = utility_cmd.check_table_count(PXC_UPPER_BASE, 'db_partitioning', - WORKDIR + '/node1/mysql.sock', - WORKDIR + '/node2/mysql.sock') - utility_cmd.check_testcase(result, "Checksum run for DB: db_partitioning") - utility_cmd.stop_pxc(WORKDIR, PXC_UPPER_BASE, NODE) + utility_cmd.test_table_count(self.node1, self.node2, 'test') + utility_cmd.test_table_count(self.node1, self.node2, 'db_galera') + utility_cmd.test_table_count(self.node1, self.node2, 'db_transactions') + utility_cmd.test_table_count(self.node1, self.node2, 'db_partitioning') + self.shutdown_nodes() + +utility.test_header("PXC Upgrade test : Upgrading from PXC-" + lower_version + " to PXC-" + upper_version) -query = PXC_LOWER_BASE + "/bin/mysqld --version 2>&1 | grep -oe '[0-9]\.[0-9][\.0-9]*' | head -n1" -lower_version = os.popen(query).read().rstrip() -query = PXC_UPPER_BASE + "/bin/mysqld --version 2>&1 | grep -oe '[0-9]\.[0-9][\.0-9]*' | head -n1" -upper_version = os.popen(query).read().rstrip() -version = utility_cmd.version_check(PXC_UPPER_BASE) -print('------------------------------------------------------------------------------------') -print("\nPXC Upgrade test : Upgrading from PXC-" + lower_version + " to PXC-" + upper_version) -print('------------------------------------------------------------------------------------') -print(datetime.now().strftime("%H:%M:%S ") + " Rolling upgrade without active workload") -print('------------------------------------------------------------------------------------') +utility.test_scenario_header(" Rolling upgrade without active workload") upgrade_qa = PXCUpgrade() -upgrade_qa.startup() -rqg_dataload = rqg_datagen.RQGDataGen(PXC_LOWER_BASE, WORKDIR, USER, debug) -rqg_dataload.pxc_dataload(WORKDIR + '/node1/mysql.sock') +upgrade_qa.start_pxc() +rqg_dataload = rqg_datagen.RQGDataGen(upgrade_qa.node1, debug) +rqg_dataload.pxc_dataload(workdir) upgrade_qa.rolling_upgrade('none') -print('------------------------------------------------------------------------------------') -print(datetime.now().strftime("%H:%M:%S ") + " Rolling upgrade with active readonly workload") -print('------------------------------------------------------------------------------------') -upgrade_qa.startup() -rqg_dataload = rqg_datagen.RQGDataGen(PXC_LOWER_BASE, WORKDIR, USER, debug) -rqg_dataload.pxc_dataload(WORKDIR + '/node1/mysql.sock') + +utility.test_scenario_header("Rolling upgrade with active readonly workload") +upgrade_qa.start_pxc() +rqg_dataload = rqg_datagen.RQGDataGen(upgrade_qa.node1, debug) +rqg_dataload.pxc_dataload(workdir) upgrade_qa.rolling_upgrade('readonly') -print('------------------------------------------------------------------------------------') -print(datetime.now().strftime("%H:%M:%S ") + " Rolling upgrade with active read/write workload" - "(enforcing SST on node-join)") -print('------------------------------------------------------------------------------------') -upgrade_qa.startup() -rqg_dataload = rqg_datagen.RQGDataGen(PXC_LOWER_BASE, WORKDIR, USER, debug) -rqg_dataload.pxc_dataload(WORKDIR + '/node1/mysql.sock') + +utility.test_scenario_header(" Rolling upgrade with active read/write workload enforcing SST on node-join)") +upgrade_qa.start_pxc() +rqg_dataload = rqg_datagen.RQGDataGen(upgrade_qa.node1, debug) +rqg_dataload.pxc_dataload(workdir) upgrade_qa.rolling_upgrade('readwrite_sst') -print('------------------------------------------------------------------------------------') -print(datetime.now().strftime("%H:%M:%S ") + " Rolling upgrade with active read/write workload" - "(enforcing IST on node-join)") -print('------------------------------------------------------------------------------------') -upgrade_qa.startup('wsrep_extra') -rqg_dataload = rqg_datagen.RQGDataGen(PXC_LOWER_BASE, WORKDIR, USER, debug) -rqg_dataload.pxc_dataload(WORKDIR + '/node1/mysql.sock') + +utility.test_scenario_header(" Rolling upgrade with active read/write workload enforcing IST on node-join)") +upgrade_qa.set_wsrep_provider_options('gcache.keep_pages_size=5;gcache.page_size=1024M;gcache.size=1024M;') +upgrade_qa.start_pxc() +rqg_dataload = rqg_datagen.RQGDataGen(upgrade_qa.node1, debug) +rqg_dataload.pxc_dataload(workdir) upgrade_qa.rolling_upgrade('readwrite') if int(version) > int("080000"): - print('------------------------------------------------------------------------------------') - print(datetime.now().strftime("%H:%M:%S ") + "Mix of PXC-" + - lower_version + " and PXC-" + upper_version + "(without active workload)") - print('------------------------------------------------------------------------------------') + utility.test_scenario_header("Mix of PXC-" + lower_version + " and PXC-" + upper_version + + " (without active workload)") upgrade_qa = PXCUpgrade() - upgrade_qa.startup() - upgrade_qa.start_upper_version() - print('------------------------------------------------------------------------------------') - print(datetime.now().strftime("%H:%M:%S ") + "Mix of PXC-" + - lower_version + " and PXC-" + upper_version + "(with active read/write workload)") - print('------------------------------------------------------------------------------------') - upgrade_qa.startup('wsrep_extra') - rqg_dataload = rqg_datagen.RQGDataGen(PXC_LOWER_BASE, WORKDIR, USER, debug) - rqg_dataload.pxc_dataload(WORKDIR + '/node1/mysql.sock') - upgrade_qa.sysbench_run(WORKDIR + '/node1/mysql.sock', 'test', 'readwrite') - upgrade_qa.start_upper_version() - -utility_cmd.stop_pxc(WORKDIR, BASEDIR, NODE) + upgrade_qa.start_pxc() + upgrade_qa.join_higher_version_node() + utility.test_scenario_header("Mix of PXC-" + lower_version + " and PXC-" + upper_version + + " (with active read/write workload)") + upgrade_qa.set_wsrep_provider_options('gcache.keep_pages_size=5;gcache.page_size=1024M;gcache.size=1024M;') + upgrade_qa.start_pxc() + rqg_dataload = rqg_datagen.RQGDataGen(upgrade_qa.node1, debug) + rqg_dataload.pxc_dataload(workdir) + upgrade_qa.sysbench_run('readwrite') + upgrade_qa.join_higher_version_node() + upgrade_qa.shutdown_nodes() diff --git a/suite/upgrade/pxc_upgrade_replacement.py b/suite/upgrade/pxc_upgrade_replacement.py old mode 100644 new mode 100755 index 830f017..2c6bd8c --- a/suite/upgrade/pxc_upgrade_replacement.py +++ b/suite/upgrade/pxc_upgrade_replacement.py @@ -1,291 +1,32 @@ #!/usr/bin/env python3 import os import sys -import argparse -import subprocess -import time -import shutil -from datetime import datetime + cwd = os.path.dirname(os.path.realpath(__file__)) parent_dir = os.path.normpath(os.path.join(cwd, '../../')) sys.path.insert(0, parent_dir) -from config import * -from util import pxc_startup -from util import db_connection -from util import sysbench_run +from base_test import * from util import utility from util import rqg_datagen -# Read argument -parser = argparse.ArgumentParser(prog='PXC upgrade test', usage='%(prog)s [options]') -parser.add_argument('-e', '--encryption-run', action='store_true', - help='This option will enable encryption options') -parser.add_argument('-d', '--debug', action='store_true', - help='This option will enable debug logging') -args = parser.parse_args() -if args.encryption_run is True: - encryption = 'YES' -else: - encryption = 'NO' -if args.debug is True: - debug = 'YES' -else: - debug = 'NO' - -utility_cmd = utility.Utility(debug) -utility_cmd.check_python_version() - - -class PXCUpgrade: - def startup(self, wsrep_extra=None): - # Start PXC cluster for upgrade test - dbconnection_check = db_connection.DbConnection(USER, WORKDIR + '/node1/mysql.sock') - server_startup = pxc_startup.StartCluster(parent_dir, WORKDIR, PXC_LOWER_BASE, int(NODE), debug) - result = server_startup.sanity_check() - utility_cmd.check_testcase(result, "Startup sanity check") - if encryption == 'YES': - if wsrep_extra is not None: - result = server_startup.create_config('encryption', - 'gcache.keep_pages_size=5;' - 'gcache.page_size=1024M;gcache.size=1024M;') - else: - result = server_startup.create_config('encryption') - utility_cmd.check_testcase(result, "Configuration file creation") - else: - if wsrep_extra is not None: - result = server_startup.create_config('none', 'gcache.keep_pages_size=5;' - 'gcache.page_size=1024M;gcache.size=1024M;') - else: - result = server_startup.create_config('none') - utility_cmd.check_testcase(result, "Configuration file creation") - result = server_startup.initialize_cluster() - utility_cmd.check_testcase(result, "Initializing cluster") - result = server_startup.start_cluster() - utility_cmd.check_testcase(result, "Cluster startup") - result = dbconnection_check.connection_check() - utility_cmd.check_testcase(result, "Database connection") - def startup_check(self, cluster_node): - """ This method will check the node - startup status. - """ - # Check wsrep sync status - query_cluster_status = PXC_LOWER_BASE + '/bin/mysql --user=root --socket=' + \ - WORKDIR + '/node' + str(cluster_node) + \ - '/mysql.sock -Bse"show status like \'wsrep_local_state_comment\';"' \ - ' 2>/dev/null | awk \'{print $2}\'' - # Get ping status - ping_query = PXC_LOWER_BASE + '/bin/mysqladmin --user=root --socket=' + \ - WORKDIR + '/node' + str(cluster_node) + \ - '/mysql.sock ping > /dev/null 2>&1' - # check server live status - Timeout 300 sec - for startup_timer in range(300): - time.sleep(1) - ping_check = subprocess.call(ping_query, shell=True, stderr=subprocess.DEVNULL) - ping_status = ("{}".format(ping_check)) - if int(ping_status) == 0: - wsrep_status = "" - while wsrep_status != "Synced": - status_query = BASEDIR + '/bin/mysql --user=root --socket=' + \ - WORKDIR + '/node' + str(cluster_node) + \ - '/mysql.sock -Bse"show status like ' \ - "'wsrep_local_state_comment'\" 2>&1 | awk \'{print $2}\'" - wsrep_status = os.popen(status_query).read().rstrip() - utility_cmd.check_testcase(int(ping_status), "Node startup is successful" - "(Node status:" + wsrep_status + ")") - break # break the loop if mysqld is running - if startup_timer > 298: - utility_cmd.check_testcase(0, "ERROR! Node is not synced with cluster. " - "Check the error log to get more info") - exit(1) - - def start_upper_version(self): - # Start PXC cluster for upgrade test - # Copy node3.cnf to node4.cnf - shutil.copy(WORKDIR + '/conf/node3.cnf', - WORKDIR + '/conf/node4.cnf') - # get cluster address - query = PXC_LOWER_BASE + '/bin/mysql --user=root --socket=' + WORKDIR + \ - '/node3/mysql.sock -Bse"show variables like \'wsrep_cluster_address\';"' \ - ' 2>/dev/null | awk \'{print $2}\'' - wsrep_cluster_addr = os.popen(query).read().rstrip() - # get node3 port - query = PXC_LOWER_BASE + "/bin/mysql --user=root --socket=" + \ - WORKDIR + '/node3/mysql.sock -Bse"select @@port" 2>&1' - port_no = os.popen(query).read().rstrip() - wsrep_port_no = int(port_no) + 108 # node4 cluster connection port - port_no = int(port_no) + 100 # node4 port - # Update node4.cnf for startup - os.system("sed -i 's#node3#node4#g' " + WORKDIR + '/conf/node4.cnf') - os.system("sed -i '/wsrep_sst_auth=root:/d' " + WORKDIR + '/conf/node4.cnf') - os.system("sed -i '0,/^[ \\t]*wsrep_cluster_address[ \\t]*=.*$/s|" - "^[ \\t]*wsrep_cluster_address[ \\t]*=.*$|wsrep_cluster_address=" - + wsrep_cluster_addr + "127.0.0.1:" + str(wsrep_port_no) + "|' " - + WORKDIR + '/conf/node4.cnf') - os.system("sed -i '0,/^[ \\t]*port[ \\t]*=.*$/s|" - "^[ \\t]*port[ \\t]*=.*$|port=" - + str(port_no) + "|' " + WORKDIR + '/conf/node4.cnf') - os.system('sed -i "0,/^[ \\t]*wsrep_provider_options[ \\t]*=.*$/s|' - "^[ \\t]*wsrep_provider_options[ \\t]*=.*$|wsrep_provider_options=" - "'gmcast.listen_addr=tcp://127.0.0.1:" + str(wsrep_port_no) + "'" - '|" ' + WORKDIR + '/conf/node4.cnf') - os.system("sed -i '0,/^[ \\t]*server_id[ \\t]*=.*$/s|" - "^[ \\t]*server_id[ \\t]*=.*$|server_id=" - "14|' " + WORKDIR + '/conf/node4.cnf') - # Create startup script for node4 - create_startup = 'sed "s#' + PXC_LOWER_BASE + '#' + PXC_UPPER_BASE + \ - '#g" ' + WORKDIR + '/log/startup3.sh > ' + \ - WORKDIR + '/log/startup4.sh' - if debug == 'YES': - print(create_startup) - os.system(create_startup) - os.system("sed -i 's#node3#node4#g' " + WORKDIR + '/log/startup4.sh') - os.system("rm -rf " + WORKDIR + '/node4') - os.mkdir(WORKDIR + '/node4') - # start node4 - upgrade_startup = "bash " + WORKDIR + \ - '/log/startup4.sh' - if debug == 'YES': - print(upgrade_startup) - result = os.system(upgrade_startup) - utility_cmd.check_testcase(result, "Starting PXC-8.0 cluster node4 for upgrade testing") - self.startup_check(4) - - def sysbench_run(self, node1_socket, db, upgrade_type): - # Sysbench dataload for consistency test - sysbench_node1 = sysbench_run.SysbenchRun(PXC_LOWER_BASE, WORKDIR, - node1_socket, debug) - # Sanity check for sysbench run - result = sysbench_node1.sanity_check(db) - utility_cmd.check_testcase(result, "Sysbench run sanity check") - # Sysbench dataload - result = sysbench_node1.sysbench_load(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, SYSBENCH_NORMAL_TABLE_SIZE) - utility_cmd.check_testcase(result, "Sysbench data load") - version = utility_cmd.version_check(PXC_LOWER_BASE) # get server version - if int(version) > int("050700"): - if encryption == 'YES': - for i in range(1, int(SYSBENCH_TABLE_COUNT) + 1): - # Enable encryption for normal table - encrypt_table = PXC_LOWER_BASE + '/bin/mysql --user=root ' \ - '--socket=' + WORKDIR + '/node1/mysql.sock -e "' \ - ' alter table ' + db + '.sbtest' + str(i) + \ - " encryption='Y'" \ - '"; > /dev/null 2>&1' - if debug == 'YES': - print(encrypt_table) - os.system(encrypt_table) - sysbench_node2 = sysbench_run.SysbenchRun(PXC_LOWER_BASE, WORKDIR, - WORKDIR + '/node2/mysql.sock') - sysbench_node3 = sysbench_run.SysbenchRun(PXC_LOWER_BASE, WORKDIR, - WORKDIR + '/node3/mysql.sock') - # sysbench read/write run - if upgrade_type == 'readwrite' or upgrade_type == 'readwrite_sst': - result = sysbench_node1.sysbench_oltp_read_write(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, - SYSBENCH_NORMAL_TABLE_SIZE, 1000, 'Yes') - utility_cmd.check_testcase(result, "Initiated sysbench oltp run on node1") - result = sysbench_node2.sysbench_oltp_read_write(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, - SYSBENCH_NORMAL_TABLE_SIZE, 1000, 'Yes') - utility_cmd.check_testcase(result, "Initiated sysbench oltp run on node2") - result = sysbench_node3.sysbench_oltp_read_write(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, - SYSBENCH_NORMAL_TABLE_SIZE, 1000, 'Yes') - utility_cmd.check_testcase(result, "Initiated sysbench oltp run on node3") - # sysbench readonly run - elif upgrade_type == 'readonly': - result = sysbench_node1.sysbench_oltp_read_only(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, - SYSBENCH_NORMAL_TABLE_SIZE, 1000, 'Yes') - utility_cmd.check_testcase(result, "Initiated sysbench readonly run on node1") - result = sysbench_node2.sysbench_oltp_read_only(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, - SYSBENCH_NORMAL_TABLE_SIZE, 1000, 'Yes') - utility_cmd.check_testcase(result, "Initiated sysbench readonly run on node2") - result = sysbench_node3.sysbench_oltp_read_only(db, SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, - SYSBENCH_NORMAL_TABLE_SIZE, 1000, 'Yes') - utility_cmd.check_testcase(result, "Initiated sysbench readonly run on node3") +class PXCUpgrade(BaseTest): + def __init__(self): + super().__init__(vers=Version.LOWER) def rolling_replacement(self): # Start PXC cluster for rolling replacement test - for i in range(1, int(NODE) + 1): - shutil.copy(WORKDIR + '/conf/node' + str(int(i + 2)) + '.cnf', - WORKDIR + '/conf/node' + str(int(i + 3)) + '.cnf') - query = PXC_LOWER_BASE + '/bin/mysql --user=root --socket=' + WORKDIR + \ - '/node' + str(int(i + 2)) + '/mysql.sock -Bse"show variables like \'wsrep_cluster_address\';"' \ - ' 2>/dev/null | awk \'{print $2}\'' - wsrep_cluster_addr = os.popen(query).read().rstrip() - query = PXC_LOWER_BASE + "/bin/mysql --user=root --socket=" + \ - WORKDIR + '/node' + str(int(i + 2)) + '/mysql.sock -Bse"select @@port" 2>&1' - port_no = os.popen(query).read().rstrip() - wsrep_port_no = int(port_no) + 108 - port_no = int(port_no) + 100 - os.system("sed -i 's#node" + str(int(i + 2)) + - "#node" + str(int(i + 3)) + "#g' " + - WORKDIR + '/conf/node' + str(int(i + 3)) + '.cnf') - os.system("sed -i '/wsrep_sst_auth=root:/d' " + - WORKDIR + '/conf/node' + str(int(i + 3)) + '.cnf') - os.system("sed -i '0,/^[ \\t]*wsrep_cluster_address[ \\t]*=.*$/s|" - "^[ \\t]*wsrep_cluster_address[ \\t]*=.*$|wsrep_cluster_address=" - + wsrep_cluster_addr + "127.0.0.1:" + str(wsrep_port_no) + "|' " - + WORKDIR + '/conf/node' + str(int(i + 3)) + '.cnf') - os.system("sed -i '0,/^[ \\t]*port[ \\t]*=.*$/s|" - "^[ \\t]*port[ \\t]*=.*$|port=" - + str(port_no) + "|' " + WORKDIR + '/conf/node' + str(int(i + 3)) + '.cnf') - os.system('sed -i "0,/^[ \\t]*wsrep_provider_options[ \\t]*=.*$/s|' - "^[ \\t]*wsrep_provider_options[ \\t]*=.*$|wsrep_provider_options=" - "'gmcast.listen_addr=tcp://127.0.0.1:" + str(wsrep_port_no) + - "'|\" " + WORKDIR + '/conf/node' + str(int(i + 3)) + '.cnf') - os.system("sed -i '0,/^[ \\t]*server_id[ \\t]*=.*$/s|" - "^[ \\t]*server_id[ \\t]*=.*$|server_id=" - "14|' " + WORKDIR + '/conf/node' + str(int(i + 3)) + '.cnf') - - create_startup = 'sed "s#' + PXC_LOWER_BASE + '#' + PXC_UPPER_BASE + \ - '#g" ' + WORKDIR + '/log/startup' + str(int(i + 2)) + '.sh > ' + \ - WORKDIR + '/log/startup' + str(int(i + 3)) + '.sh' - if debug == 'YES': - print(create_startup) - os.system(create_startup) - os.system("sed -i 's#node" + str(int(i + 2)) + - "#node" + str(int(i + 3)) + "#g' " + WORKDIR + - '/log/startup' + str(int(i + 3)) + '.sh') - os.system("rm -rf " + WORKDIR + '/node' + str(int(i + 3))) - os.mkdir(WORKDIR + '/node' + str(int(i + 3))) - upgrade_startup = "bash " + WORKDIR + \ - '/log/startup' + str(int(i + 3)) + '.sh' - - status_query = BASEDIR + '/bin/mysql --user=root --socket=' + \ - WORKDIR + '/node1/mysql.sock -Bse"show status like ' \ - "'wsrep_local_state_comment'\" 2>&1 | awk \'{print $2}\'" - wsrep_status = os.popen(status_query).read().rstrip() - print(wsrep_status) - status_query = BASEDIR + '/bin/mysql --user=root --socket=' + \ - WORKDIR + '/node2/mysql.sock -Bse"show status like ' \ - "'wsrep_local_state_comment'\" 2>&1 | awk \'{print $2}\'" - wsrep_status = os.popen(status_query).read().rstrip() - print(wsrep_status) - status_query = BASEDIR + '/bin/mysql --user=root --socket=' + \ - WORKDIR + '/node3/mysql.sock -Bse"show status like ' \ - "'wsrep_local_state_comment'\" 2>&1 | awk \'{print $2}\'" - wsrep_status = os.popen(status_query).read().rstrip() - print(wsrep_status) - time.sleep(10) - if debug == 'YES': - print(upgrade_startup) - result = os.system(upgrade_startup) - utility_cmd.check_testcase(result, "Starting PXC-8.0 cluster node" + - str(int(i + 3)) + " for upgrade testing") - self.startup_check(int(i + 3)) + node = self.node3 + for i in [4, 5, 6]: + node = pxc_startup.StartCluster.join_new_upgraded_node(node, i, debug) + self.pxc_nodes.append(node) -query = PXC_LOWER_BASE + "/bin/mysqld --version 2>&1 | grep -oe '[0-9]\.[0-9][\.0-9]*' | head -n1" -lower_version = os.popen(query).read().rstrip() -query = PXC_UPPER_BASE + "/bin/mysqld --version 2>&1 | grep -oe '[0-9]\.[0-9][\.0-9]*' | head -n1" -upper_version = os.popen(query).read().rstrip() -version = utility_cmd.version_check(PXC_UPPER_BASE) -print('--------------------------------------------------------------------------------------------') -print("\nPXC Upgrade test : Upgrading from PXC-" + lower_version + " to PXC-" + upper_version) -print('--------------------------------------------------------------------------------------------') -print(datetime.now().strftime("%H:%M:%S ") + " Rolling replacement upgrade without active workload") -print('--------------------------------------------------------------------------------------------') +utility.test_header("PXC Upgrade test : Upgrading from PXC-" + lower_version + " to PXC-" + upper_version) +utility.test_scenario_header("Rolling replacement upgrade without active workload") upgrade_qa = PXCUpgrade() -upgrade_qa.startup() -rqg_dataload = rqg_datagen.RQGDataGen(PXC_LOWER_BASE, WORKDIR, USER, debug) -rqg_dataload.pxc_dataload(WORKDIR + '/node1/mysql.sock') +upgrade_qa.start_pxc() +rqg_dataload = rqg_datagen.RQGDataGen(upgrade_qa.node1, debug) +rqg_dataload.pxc_dataload(workdir) upgrade_qa.rolling_replacement() -utility_cmd.stop_pxc(WORKDIR, BASEDIR, NODE) +upgrade_qa.shutdown_nodes() diff --git a/test/test_startup.py b/test/test_startup.py index d38e5d3..31bde91 100644 --- a/test/test_startup.py +++ b/test/test_startup.py @@ -1,7 +1,6 @@ import configparser import unittest -import pxc_startup -from util import db_connection +from util import db_connection, pxc_startup config = configparser.ConfigParser() config.read('config.ini') @@ -9,32 +8,33 @@ workdir = config['config']['workdir'] basedir = config['config']['basedir'] -cluster = pxc_startup.StartCluster(workdir, basedir, 2) -connection_check = db_connection.DbConnection('root', '/tmp/node1.sock') -connection_check.connectioncheck() +cluster = pxc_startup.StartCluster(3, 'YES') +connection_check = db_connection.DbConnection(user='root', socket='/tmp/node1.sock') +connection_check.connection_check() + class TestStartup(unittest.TestCase): - def test_sanitycheck(self): - self.assertEqual(cluster.sanitycheck(), 0, + def test_sanity_check(self): + self.assertEqual(cluster.sanity_check(), 0, 'work/base directory have some issues') print('PXC Sanity check') - def test_initializecluster(self): - self.assertIsNot(cluster.initializecluster(), 1, + def test_initialize_cluster(self): + self.assertIsNot(cluster.initialize_cluster(), 1, 'Could not initialize database directory. ' 'Please check error log') - def test_startcluster(self): - self.assertIsNot(cluster.startcluster(), 1, + def test_start_cluster(self): + self.assertIsNot(cluster.start_cluster(), 1, 'Could not start cluster, ' 'Please check error log') - print('Starting Cluster') + print('Started Cluster') - def test_connectionchecl(self): - self.assertIsNot(connection_check.connectioncheck(), 1, + def test_connection_check(self): + self.assertIsNot(connection_check.connection_check(), 1, 'Could not establish DB connection') - print('Checking DB connection') + print('Checked DB connection') if __name__ == '__main__': diff --git a/util/create_cnf.py b/util/create_cnf.py index 302d296..2ca22ba 100644 --- a/util/create_cnf.py +++ b/util/create_cnf.py @@ -1,18 +1,28 @@ # This will help us to create cluster cnf on the fly import os -import configparser import shutil import random +from config import WORKDIR + +workdir = WORKDIR + +cwd = os.path.dirname(os.path.realpath(__file__)) +parent_dir = os.path.normpath(os.path.join(cwd, '../')) + +pxc_conf = parent_dir + '/conf/pxc.cnf' + + +def node_conf(node_number: int): + return workdir + '/conf/node' + str(node_number) + '.cnf' + class CreateCNF: - def __init__(self, workdir, basedir, node): - self.node = node - self.workdir = workdir - self.basedir = basedir + def __init__(self, number_of_nodes: int): + self.__number_of_nodes = number_of_nodes - def createconfig(self): + def create_config(self): """ Method to create cluster configuration file based on the node count. To create configuration file it will take default values from conf/pxc.cnf. @@ -22,15 +32,15 @@ def createconfig(self): port = random.randint(10, 50) * 1001 port_list = [] addr_list = '' - for j in range(1, self.node + 1): + for j in range(1, self.__number_of_nodes + 1): port_list += [port + (j * 2)] addr_list = addr_list + '127.0.0.1:' + str(port + (j * 2) + 2) + ',' - if not os.path.isfile(self.workdir + '/conf/pxc.cnf'): - print('Default pxc.cnf is missing in ' + self.workdir + '/conf') + if not os.path.isfile(pxc_conf): + print('Default pxc.cnf is missing ' + pxc_conf) return 1 - for i in range(1, self.node + 1): - shutil.copy(self.workdir + '/conf/pxc.cnf', self.workdir + '/conf/node' + str(i) + '.cnf') - cnf_name = open(self.workdir + '/conf/node' + str(i) + '.cnf', 'a+') + for i in range(1, self.__number_of_nodes + 1): + shutil.copy(parent_dir + '/conf/pxc.cnf', node_conf(i)) + cnf_name = open(node_conf(i), 'a+') cnf_name.write('wsrep_cluster_address=gcomm://' + addr_list + '\n') cnf_name.write('port=' + str(port_list[i - 1]) + '\n') cnf_name.write("wsrep_provider_options='gmcast.listen_addr=tcp://127.0.0.1:" + @@ -39,9 +49,5 @@ def createconfig(self): return 0 -config = configparser.ConfigParser() -config.read('config.ini') -workdir = config['config']['workdir'] -basedir = config['config']['basedir'] -cnf_file = CreateCNF(workdir, basedir, 2) -cnf_file.createconfig() \ No newline at end of file +cnf_file = CreateCNF(2) +cnf_file.create_config() diff --git a/util/createsql.py b/util/createsql.py index 0a92186..13033c4 100644 --- a/util/createsql.py +++ b/util/createsql.py @@ -14,26 +14,27 @@ varchar_count = [32, 64, 126, 256, 1024] +def opt_selection(myextra): + if myextra == "pk": + return "PRIMARY KEY" + elif myextra == "uk": + return "UNIQUE" + else: + return "" + + class GenerateSQL: def __init__(self, filename, lines): - self.filename = filename + self.filename = "/tmp/" + filename self.lines = lines self.table_count = random.randint(1, len(table_names)) self.column_count = random.randint(1, len(column_names)) self.insert_sql_count = int(((self.lines / self.table_count) - 1)) - def OutFile(self): + def out_file(self): sys.stdout = open(self.filename, "w") - def OptSelection(self, myextra): - if myextra == "pk": - return "PRIMARY KEY" - elif myextra == "uk": - return "UNIQUE" - else: - return "" - - def CreateTable(self): + def create_table(self): # Create table with random data. for i in range(self.table_count): data_types = "" @@ -63,7 +64,7 @@ def CreateTable(self): data_value = data_value[:-2] print("INSERT INTO " + table_name + " values (" + data_value + ");") - def DropTable(self): + def drop_table(self): for i in range(self.table_count): table_name = table_names[i] print("DROP TABLE IF EXISTS " + table_name + ";") diff --git a/util/data_generator.py b/util/data_generator.py index dce4344..8885b89 100644 --- a/util/data_generator.py +++ b/util/data_generator.py @@ -47,10 +47,9 @@ def usage(): LINE_COUNT = lines # Generate random data -OUTFILE = "/tmp/" + outfile -generate_sql = createsql.GenerateSQL(OUTFILE, LINE_COUNT) -generate_sql.OutFile() -generate_sql.CreateTable() -generate_sql.DropTable() +generate_sql = createsql.GenerateSQL(outfile, LINE_COUNT) +generate_sql.out_file() +generate_sql.create_table() +generate_sql.drop_table() sys.stdout = sys.__stdout__ -print("DONE! Generated " + OUTFILE) +print("DONE! Generated " + outfile) diff --git a/util/db_connection.py b/util/db_connection.py index 82703b8..949ade1 100644 --- a/util/db_connection.py +++ b/util/db_connection.py @@ -1,28 +1,261 @@ +import sys +from datetime import datetime + import mysql.connector +from _mysql_connector import MySQLInterfaceError class DbConnection: - def __init__(self, user, socket): - self.user = user - self.socket = socket + def __init__(self, user, password=None, host='localhost', port=None, socket=None, node_num: int = 1, data_dir=None, + conf_file=None, err_log=None, base_dir=None, startup_script=None, debug='No'): + self.__user = user + self.__socket = socket + self.__data_dir = data_dir + self.__conf_file = conf_file + self.__err_log = err_log + self.__debug = debug + self.__base_dir = base_dir + self.__startup_script = startup_script + self.__node_num = node_num + self.__host = host + self.__port = port + self.__password = password + + def connect(self): + if self.__socket is None: + return mysql.connector.connect(host=self.__host, port=self.__port, user=self.__user, + password=self.__password) + else: + return mysql.connector.connect(host=self.__host, unix_socket=self.__socket, user=self.__user) - def connection_check(self): + def connection_check(self, log_error_on_failure: bool = True): """ Method to test the cluster database connection. - Since we are initializing the cluster using - --initialize-insecure option we can login - to database using default user (username : root) - without password. """ - # Database connection string - connection = mysql.connector.connect(host='localhost', user=self.user, unix_socket=self.socket) + connection = None try: + # Database connection string + connection = self.connect() if connection.is_connected(): - # db_info = connection.get_server_info() return 0 - except Exception as e: - print("Error while connecting to MySQL", e) + except Exception as mysql_connection_error: + if log_error_on_failure: + print("Error while opening connection to server " + str(mysql_connection_error)) return 1 finally: # closing database connection. - if connection.is_connected(): + if connection is not None and connection.is_connected(): connection.close() + + def test_connection_check(self): + result = self.connection_check() + # print testcase status based on success/failure output. + now = datetime.now().strftime("%H:%M:%S ") + if result == 0: + print(now + ' ' + f'{"connection_check":100}' + '[ \u2713 ]') + else: + print(now + ' ' + f'{"connection_check":100}' + '[ \u2717 ]') + exit(1) + + def execute(self, query: str, connection=None, log_query=True): + cnx = None + try: + if connection is not None: + print("using existing connection") + cnx = connection + else: + cnx = DbConnection.connect(self) + if self.__debug == 'YES' and log_query: + print(query) + cursor = cnx.cursor() + cursor.execute(query) + return cursor + finally: + # closing database connection. + if connection is None: + if cnx is not None and cnx.is_connected(): + cnx.close() + if connection is not None: + print("not closing existing connection") + + def execute_queries(self, queries: list[str]): + cnx = None + try: + if self.__debug == 'YES': + print("Queries to execute :") + for query in queries: + print(query) + cnx = DbConnection.connect(self) + cursor = cnx.cursor(buffered=True) + for query in queries: + cursor.execute(query) + finally: + if cnx is not None and cnx.is_connected(): + cnx.close() + + def execute_get_value(self, query: str, retries: int = 0): + cnx = None + try: + cnx = self.connect() + cursor = cnx.cursor(buffered=True) + cursor.execute(query) + row = cursor.fetchone() + if self.__debug == 'YES': + print(row[0]) + return row[0] + except MySQLInterfaceError as mysqlInterfaceError: + if retries > 0: + print("Retrying, left number of retries" + str(retries)) + self.execute_get_value(query, int(retries - 1)) + else: + raise Exception(str(mysqlInterfaceError)) + finally: + # closing database connection. + if cnx is not None and cnx.is_connected(): + cnx.close() + + def execute_get_values(self, query: str): + cnx = None + try: + cnx = self.connect() + cursor = cnx.cursor(buffered=True) + cursor.execute(query) + records = cursor.fetchall() + print("Number of rows: ", cursor.rowcount) + if self.__debug == 'YES': + print("Table rows : " + str(records)) + return records + finally: + # closing database connection. + if cnx is not None and cnx.is_connected(): + cnx.close() + + def execute_get_row(self, query: str): + cnx = None + try: + cnx = self.connect() + cursor = cnx.cursor() + cursor.execute(query) + records = cursor.fetchall() + if self.__debug == 'YES': + print("Total number of rows in table: ", cursor.rowcount) + return records[0] + finally: + # closing database connection. + if cnx is not None and cnx.is_connected(): + cnx.close() + + def get_column_value(self, query: str, column: str): + cnx = None + try: + cnx = self.connect() + cursor = cnx.cursor(buffered=True, dictionary=True) + cursor.execute(query) + row = cursor.fetchone() + if self.__debug == 'YES': + print(row[column]) + return row[column] + finally: + if cnx is not None and cnx.is_connected(): + cnx.close() + + def shutdown(self): + cnx = None + try: + cnx = self.connect() + cursor = cnx.cursor() + cursor.execute("shutdown") + print("Shutdown done Node" + str(self.__node_num)) + return 0 + except Exception as e: + print("Error while connecting to MySQL/Shutting down", e) + print(e) + return 0 + finally: + # closing database connection. + if cnx is not None and cnx.is_connected(): + cnx.close() + + def execute_query_from_file(self, file_path, multi=True): + cnx = None + try: + cnx = self.connect() + cursor = cnx.cursor() + with open(file_path, "r") as file: + sql_commands = file.read() + cursor.execute(sql_commands, multi=multi) + print("Execution of queries completed successfully.") + except Exception as e: + print("An error occurred while executing queries from file" + file_path + ": {}".format(e)) + cnx.close() + sys.exit(1) + finally: + if cnx is not None and cnx.is_connected(): + cnx.close() + + # Execute multiline queries from file. Each query shall be separated from other with $$ symbol + def execute_queries_from_file(self, file_path): + # Open and read the file as a single buffer + with open(file_path, "r") as file: + sql_file = file.read() + # SQL commands + sql_command_set = sql_file.split('$$') + + # Execute every command from the input file + for command in sql_command_set: + cnx = self.connect() + cursor = cnx.cursor(buffered=True) + try: + if command.rstrip() != '': + cursor.execute(command) + except ValueError as msg: + # Skip and report error + print("Command skipped: ", msg) + except Exception as e: + print("Executing query ", command, "failed due to exception", str(e)) + finally: + cursor.close() + cnx.close() + if self.__debug == 'YES': + print("Execution of queries from the file ", file_path, "is done") + + def call_proc(self, proc: str, args: list[str]): + cnx = None + try: + cnx = self.connect() + cursor = cnx.cursor() + cursor.callproc(proc, args=args) + print("Execution of stored procedure call completed successfully.") + except Exception as e: + print("An error occurred while executing stored procedure" + proc + ": {}".format(e)) + cnx.close() + sys.exit(1) + finally: + if cnx is not None and cnx.is_connected(): + cnx.close() + + def get_port(self): + return self.execute_get_value("select @@port") + + def get_user(self): + return self.__user + + def get_socket(self): + return self.__socket + + def get_data_dir(self): + return self.__data_dir + + def get_conf_file(self): + return self.__conf_file + + def get_startup_script(self): + return self.__startup_script + + def get_error_log(self): + return self.__err_log + + def get_base_dir(self): + return self.__base_dir + + def get_node_number(self): + return self.__node_num diff --git a/util/executesql.py b/util/executesql.py new file mode 100644 index 0000000..3bbc642 --- /dev/null +++ b/util/executesql.py @@ -0,0 +1,73 @@ +import random +from util import datagen +from util import db_connection + +# data_type List +data_type = ['int', 'bigint', 'char', 'varchar', 'date', 'float', 'double', 'text', 'time', 'timestamp'] +# CREATE TABLE extra options list +key_type = ['pk', 'uk'] +# Table name List +table_names = ['t1', 't2', 't3', 't4', 't5', 't6', 't7', 't8', 't9', 't10'] +# Column name list +column_names = ['c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8', 'c9', 'c10'] +# Char count list +varchar_count = [32, 64, 126, 256, 1024] + + +def opt_selection(myextra): + if myextra == "pk": + return "PRIMARY KEY" + elif myextra == "uk": + return "UNIQUE" + else: + return "" + + +class GenerateSQL: + def __init__(self, node: db_connection.DbConnection, db, lines): + self.node = node + self.db = db + self.lines = lines + self.table_count = random.randint(1, len(table_names)) + self.column_count = random.randint(1, len(column_names)) + self.insert_sql_count = int(((self.lines / self.table_count) - 1)) + + def create_table(self): + # Create table with random data. + for i in range(self.table_count): + data_types = "" + index_length = "" + typearray = [] + table_name = table_names[i] + for j in range(self.column_count): + column_description = random.choice(data_type) + typearray.append(column_description) + if j == 0: + if column_description == "text": + index_length = "(10)" + if column_description == "char": + column_description = column_description + " (1)" + if column_description == "varchar": + if j == 0: + varchar_count.remove(1024) + column_description = column_description + " (" + format(random.choice(varchar_count)) + ")" + if j == 0: + varchar_count.append(1024) + if column_description == "timestamp": + column_description = column_description + " DEFAULT CURRENT_TIMESTAMP " + data_types += column_names[j] + " " + column_description + ", " + self.node.execute("CREATE TABLE IF NOT EXISTS " + self.db + "." + table_name + "( " + data_types + + " primary key (c1" + index_length + ") );") + for j in range(self.insert_sql_count): + data_value = "" + for column_description in typearray: + text = datagen.DataGenerator(column_description) + data_value += "'" + text.getData() + "', " + data_value = data_value[:-2] + self.node.execute("INSERT INTO " + self.db + "." + table_name + " values (" + data_value + ");", + log_query=False) + + def drop_table(self): + for i in range(self.table_count): + table_name = table_names[i] + self.node.execute("DROP TABLE IF EXISTS " + self.db + "." + table_name + ";", log_query=False) diff --git a/util/prepared_statements.sql b/util/prepared_statements.sql index 4c4d9f3..254fb88 100644 --- a/util/prepared_statements.sql +++ b/util/prepared_statements.sql @@ -1,12 +1,5 @@ --- create test database for prepared statement --- ------------------------------------------- -DROP DATABASE IF EXISTS pstest; CREATE DATABASE pstest; -use pstest; - --- prepared statement for table creation -DELIMITER $$ -DROP PROCEDURE IF EXISTS PS_CREATE $$ -CREATE PROCEDURE PS_CREATE() BEGIN +DROP DATABASE IF EXISTS pstest; CREATE DATABASE pstest $$ +CREATE PROCEDURE pstest.PS_CREATE() BEGIN DECLARE a INT Default 1 ; WHILE a <= 10 DO SET @tbl = concat("tbl",a); @@ -15,15 +8,9 @@ CREATE PROCEDURE PS_CREATE() BEGIN EXECUTE stmt1; SET a=a+1; END WHILE; -END $$ -DELIMITER ; - -CALL PS_CREATE(); - --- prepared statement for index creation -DELIMITER $$ -DROP PROCEDURE IF EXISTS PS_INDEX $$ -CREATE PROCEDURE PS_INDEX() BEGIN +END$$ +CALL pstest.PS_CREATE()$$ +CREATE PROCEDURE pstest.PS_INDEX() BEGIN DECLARE a INT Default 1 ; WHILE a <= 10 DO SET @tbl = concat("tbl",a); @@ -33,14 +20,8 @@ CREATE PROCEDURE PS_INDEX() BEGIN SET a=a+1; END WHILE; END $$ -DELIMITER ; - -CALL PS_INDEX(); - --- prepared statement for insert operation -DELIMITER $$ -DROP PROCEDURE IF EXISTS PS_INSERT $$ -CREATE PROCEDURE PS_INSERT() BEGIN +CALL pstest.PS_INDEX();$$ +CREATE PROCEDURE pstest.PS_INSERT() BEGIN DECLARE create_start INT DEFAULT 1; DECLARE insert_start INT DEFAULT 1; DECLARE create_count INT DEFAULT 10; @@ -58,31 +39,19 @@ CREATE PROCEDURE PS_INSERT() BEGIN SET insert_start = 1; END WHILE; END $$ -DELIMITER ; - -CALL PS_INSERT(); - --- prepared statement for delete operation -DELIMITER $$ -DROP PROCEDURE IF EXISTS PS_DELETE $$ -CREATE PROCEDURE PS_DELETE() BEGIN +CALL pstest.PS_INSERT();$$ +CREATE PROCEDURE pstest.PS_DELETE(IN row_count INT) BEGIN DECLARE a INT Default 1 ; WHILE a <= 10 DO SET @tbl = concat("tbl",a); - SET @s = concat("DELETE FROM ",@tbl ," ORDER BY RAND() LIMIT 45"); + SET @s = concat("DELETE FROM ",@tbl ," ORDER BY RAND() LIMIT ",row_count); PREPARE stmt1 FROM @s; EXECUTE stmt1; SET a=a+1; END WHILE; END $$ -DELIMITER ; - -CALL PS_DELETE(); - --- prepared statement for analyze table -DELIMITER $$ -DROP PROCEDURE IF EXISTS PS_ANALYZE $$ -CREATE PROCEDURE PS_ANALYZE() BEGIN +CALL pstest.PS_DELETE(45) $$ +CREATE PROCEDURE pstest.PS_ANALYZE() BEGIN DECLARE a INT Default 1 ; WHILE a <= 10 DO SET @tbl = concat("tbl",a); @@ -92,31 +61,9 @@ CREATE PROCEDURE PS_ANALYZE() BEGIN SET a=a+1; END WHILE; END $$ -DELIMITER ; - -CALL PS_ANALYZE(); - --- prepared statement for delete operation -DELIMITER $$ -DROP PROCEDURE IF EXISTS PS_DELETE $$ -CREATE PROCEDURE PS_DELETE() BEGIN - DECLARE a INT Default 1 ; - WHILE a <= 10 DO - SET @tbl = concat("tbl",a); - SET @s = concat("DELETE FROM ",@tbl ," ORDER BY RAND() LIMIT 35"); - PREPARE stmt1 FROM @s; - EXECUTE stmt1; - SET a=a+1; - END WHILE; -END $$ -DELIMITER ; - -CALL PS_DELETE(); - --- prepared statement for optimize table -DELIMITER $$ -DROP PROCEDURE IF EXISTS PS_OPT_TABLE $$ -CREATE PROCEDURE PS_OPT_TABLE() BEGIN +CALL pstest.PS_ANALYZE()$$ +CALL pstest.PS_DELETE(35)$$ +CREATE PROCEDURE pstest.PS_OPT_TABLE() BEGIN DECLARE a INT Default 1 ; WHILE a <= 10 DO SET @tbl = concat("tbl",a); @@ -126,14 +73,8 @@ CREATE PROCEDURE PS_OPT_TABLE() BEGIN SET a=a+1; END WHILE; END $$ -DELIMITER ; - -CALL PS_OPT_TABLE(); - --- prepared statement for update operation -DELIMITER $$ -DROP PROCEDURE IF EXISTS PS_UPDATE $$ -CREATE PROCEDURE PS_UPDATE() BEGIN +CALL pstest.PS_OPT_TABLE()$$ +CREATE PROCEDURE pstest.PS_UPDATE() BEGIN DECLARE create_start INT DEFAULT 1; DECLARE update_start INT DEFAULT 1; DECLARE create_count INT DEFAULT 10; @@ -151,14 +92,8 @@ CREATE PROCEDURE PS_UPDATE() BEGIN SET update_start = 1; END WHILE; END $$ -DELIMITER ; - -CALL PS_UPDATE(); - --- prepared statement for repair table -DELIMITER $$ -DROP PROCEDURE IF EXISTS PS_RPR_TABLE $$ -CREATE PROCEDURE PS_RPR_TABLE() BEGIN +CALL pstest.PS_UPDATE() $$ +CREATE PROCEDURE pstest.PS_RPR_TABLE() BEGIN DECLARE a INT Default 1 ; WHILE a <= 10 DO SET @tbl = concat("tbl",a); @@ -168,14 +103,8 @@ CREATE PROCEDURE PS_RPR_TABLE() BEGIN SET a=a+1; END WHILE; END $$ -DELIMITER ; - -CALL PS_RPR_TABLE(); - --- prepared statement for drop index -DELIMITER $$ -DROP PROCEDURE IF EXISTS PS_DROP_INDEX $$ -CREATE PROCEDURE PS_DROP_INDEX() BEGIN +CALL pstest.PS_RPR_TABLE()$$ +CREATE PROCEDURE pstest.PS_DROP_INDEX() BEGIN DECLARE a INT Default 1 ; WHILE a <= 10 DO SET @tbl = concat("tbl",a); @@ -185,14 +114,8 @@ CREATE PROCEDURE PS_DROP_INDEX() BEGIN SET a=a+1; END WHILE; END $$ -DELIMITER ; - -CALL PS_DROP_INDEX(); - --- prepared statement for truncate table -DELIMITER $$ -DROP PROCEDURE IF EXISTS PS_TRUNCATE $$ -CREATE PROCEDURE PS_TRUNCATE() BEGIN +CALL pstest.PS_DROP_INDEX() $$ +CREATE PROCEDURE pstest.PS_TRUNCATE() BEGIN DECLARE a INT Default 1 ; WHILE a <= 10 DO SET @tbl = concat("tbl",a); @@ -202,14 +125,8 @@ CREATE PROCEDURE PS_TRUNCATE() BEGIN SET a=a+1; END WHILE; END $$ -DELIMITER ; - -CALL PS_TRUNCATE(); - --- prepared statement for drop table -DELIMITER $$ -DROP PROCEDURE IF EXISTS PS_DROP_TABLE $$ -CREATE PROCEDURE PS_DROP_TABLE() BEGIN +CALL pstest.PS_TRUNCATE()$$ +CREATE PROCEDURE pstest.PS_DROP_TABLE() BEGIN DECLARE a INT Default 1 ; WHILE a <= 10 DO SET @tbl = concat("tbl",a); @@ -219,26 +136,21 @@ CREATE PROCEDURE PS_DROP_TABLE() BEGIN SET a=a+1; END WHILE; END $$ -DELIMITER ; - -CALL PS_DROP_TABLE(); - --- prepared statement for create user -DELIMITER $$ -DROP PROCEDURE IF EXISTS PS_CREATE_USER $$ -CREATE PROCEDURE PS_CREATE_USER() BEGIN +CALL pstest.PS_DROP_TABLE()$$ +CREATE PROCEDURE pstest.PS_CREATE_USER() BEGIN DECLARE a INT Default 1 ; WHILE a <= 10 DO SET @tuser = concat("testuser",a); + SET @c = concat("DROP USER IF EXISTS ",@tuser,"@'%'"); SET @s = concat("CREATE USER ",@tuser,"@'%' IDENTIFIED BY 'test123'"); SET @t = concat("GRANT ALL ON *.* TO ",@tuser,"@'%'"); - PREPARE stmt1 FROM @s; - PREPARE stmt2 FROM @t; + PREPARE stmt1 FROM @c; + PREPARE stmt2 FROM @s; + PREPARE stmt3 FROM @t; EXECUTE stmt1; EXECUTE stmt2; + EXECUTE stmt3; SET a=a+1; END WHILE; END $$ -DELIMITER ; - -CALL PS_CREATE_USER(); +CALL pstest.PS_CREATE_USER()$$ diff --git a/util/ps_startup.py b/util/ps_startup.py index 93df2b0..877fad8 100644 --- a/util/ps_startup.py +++ b/util/ps_startup.py @@ -1,49 +1,97 @@ #!/usr/bin/env python # Created by Ramesh Sivaraman, Percona LLC. +# Updated by Parveez Baig # This will help us to start Percona Server import os import subprocess import random import shutil -import time -from util import sanity + +from config import WORKDIR, BASEDIR, PXC_LOWER_BASE, PXC_UPPER_BASE, USER +from util import utility, db_connection +from util.utility import Version, Utility + +workdir = WORKDIR +base_dir = BASEDIR +user = USER + +cwd = os.path.dirname(os.path.realpath(__file__)) +parent_dir = os.path.normpath(os.path.join(cwd, '../')) + + +# Create PS configuration file +default_conf = parent_dir + '/conf/ps.cnf' +default_custom_conf = parent_dir + '/conf/custom.cnf' +default_encryption_conf = parent_dir + '/conf/encryption.cnf' +workdir_custom_conf = workdir + '/conf/custom.cnf' +workdir_encryption_conf = workdir + '/conf/encryption.cnf' + + +def node_conf(i: int): + return workdir + '/conf/ps' + str(i) + '.cnf' + + +def node_datadir(i: int): + return workdir + '/psnode' + str(i) + + +def node_err_log(i: int): + return workdir + '/log/psnode' + str(i) + '.err' + + +def node_socket(i: int): + return workdir + '/psnode' + str(i) + '/mysql.sock' + + +def init_log(i: int): + return workdir + '/log/ps_init' + str(i) + '.log' + + +def set_base_dir(server_version : Version): + global base_dir + base_dir = BASEDIR + if server_version == Version.LOWER: + base_dir = PXC_LOWER_BASE + elif server_version == Version.HIGHER: + base_dir = PXC_UPPER_BASE class StartPerconaServer: - def __init__(self, scriptdir, workdir, basedir, node, debug): - self.scriptdir = scriptdir - self.workdir = workdir - self.basedir = basedir - self.node = node - self.debug = debug - - def sanity_check(self): + def __init__(self, number_of_nodes, debug, server_version: Version = None): + self.__number_of_nodes = number_of_nodes + self.__debug = debug + if Version is not None: + set_base_dir(server_version) + + def test_sanity_check(self): """ Sanity check method will remove existing data directory and forcefully kill running PS mysqld processes. This will also check the availability of mysqld binary file. """ # kill existing mysqld process - os.system("ps -ef | grep '" + self.workdir + "/conf/ps[0-9].cnf'" - " | grep -v grep | awk '{print $2}' | xargs kill -9 >/dev/null 2>&1") + os.system("ps -ef | grep '" + workdir + "/conf/ps[0-9].cnf'" + " | grep -v grep | awk '{print $2}' | xargs kill -9 >/dev/null 2>&1") + result = 0 # Create log directory - if not os.path.exists(self.workdir + '/log'): - os.mkdir(self.workdir + '/log') + if not os.path.exists(workdir + '/log'): + os.mkdir(workdir + '/log') # Create configuration directory - if not os.path.exists(self.workdir + '/conf'): - os.mkdir(self.workdir + '/conf') + if not os.path.exists(workdir + '/conf'): + os.mkdir(workdir + '/conf') # Check mysqld file - if not os.path.isfile(self.basedir + '/bin/mysqld'): - print(self.basedir + '/bin/mysqld does not exist') - return 1 - return 0 + if not os.path.isfile(base_dir + '/bin/mysqld'): + print(base_dir + '/bin/mysqld does not exist') + result = 1 + utility_cmd = utility.Utility(self.__debug) + utility_cmd.check_testcase(result, "PS: Startup sanity check") # This method will help us to check PS version def version_check(self): # Database version check - version_info = os.popen(self.basedir + "/bin/mysqld --version 2>&1 | " - "grep -oe '[0-9]\.[0-9][\.0-9]*' | head -n1").read() + version_info = os.popen(base_dir + "/bin/mysqld --version 2>&1 | " + "grep -oe '[0-9]\.[0-9][\.0-9]*' | head -n1").read() version = "{:02d}{:02d}{:02d}".format(int(version_info.split('.')[0]), int(version_info.split('.')[1]), int(version_info.split('.')[2])) @@ -56,109 +104,109 @@ def create_config(self, conf_extra=None): For customised configuration please add your values in conf/custom.conf. """ - version = sanity.version_check(self.basedir) # Get server version + version = Utility.version_check(base_dir) # Get server version port = random.randint(21, 30) * 1004 port_list = [] - for j in range(1, self.node + 1): + result = 0 + for j in range(1, self.__number_of_nodes + 1): port_list += [port + (j * 100)] - # Create PS configuration file - if not os.path.isfile(self.scriptdir + '/conf/ps.cnf'): - print('Default pxc.cnf is missing in ' + self.scriptdir + '/conf') - return 1 + if not os.path.isfile(default_conf): + print('Default pxc.cnf is missing ' + default_conf) + result = 1 else: - shutil.copy(self.scriptdir + '/conf/custom.cnf', self.workdir + '/conf/custom.cnf') + shutil.copy(default_custom_conf, workdir_custom_conf) # Add custom mysqld options in configuration file - for i in range(1, self.node + 1): - shutil.copy(self.scriptdir + '/conf/ps.cnf', self.workdir + '/conf/ps' + str(i) + '.cnf') - cnf_name = open(self.workdir + '/conf/ps' + str(i) + '.cnf', 'a+') + for i in range(1, self.__number_of_nodes + 1): + conf = node_conf(i) + shutil.copy(default_conf, conf) + cnf_name = open(conf, 'a+') cnf_name.write('\nport=' + str(port_list[i - 1]) + '\n') if int(version) > int("050700"): cnf_name.write('log_error_verbosity=3\n') - cnf_name.write('socket=/tmp/psnode' + str(i) + '.sock\n') + cnf_name.write('socket=' + node_socket(i) + '\n') cnf_name.write('server_id=' + str(100 + i) + '\n') - cnf_name.write('!include ' + self.workdir + '/conf/custom.cnf\n') + cnf_name.write('!include ' + workdir_custom_conf + '\n') if conf_extra == 'encryption': - shutil.copy(self.scriptdir + '/conf/encryption.cnf', - self.workdir + '/conf/encryption.cnf') - cnf_name.write('!include ' + self.workdir + '/conf/encryption.cnf\n') + shutil.copy(default_encryption_conf, workdir_encryption_conf) + cnf_name.write('!include ' + workdir_encryption_conf + '\n') cnf_name.close() - return 0 + utility_cmd = utility.Utility(self.__debug) + utility_cmd.check_testcase(result, "PS: Configuration file creation") def add_myextra_configuration(self, config_file): """ Adding extra configurations based on the testcase """ + result = 0 if not os.path.isfile(config_file): print('Custom config ' + config_file + ' is missing') - return 1 + result = 1 # Add custom configurations config_file = config_file - cnf_name = open(self.workdir + '/conf/custom.cnf', 'a+') + cnf_name = open(workdir_custom_conf, 'a+') cnf_name.write('\n') cnf_name.write('!include ' + config_file + '\n') cnf_name.close() - return 0 + utility_cmd = utility.Utility(self.__debug) + utility_cmd.check_testcase(result, "PS: Adding custom configuration") - def initialize_cluster(self): - """ Method to initialize the cluster database - directories. This will initialize the cluster + def initialize_server(self): + """ Method to initialize the server database + directories. This will initialize the server using --initialize-insecure option for passwordless authentication. """ result = 1 # return value - for i in range(1, self.node + 1): - if os.path.exists(self.workdir + '/psnode' + str(i)): - os.system('rm -rf ' + self.workdir + '/psnode' + str(i) + ' >/dev/null 2>&1') - if not os.path.isfile(self.workdir + '/conf/ps' + str(i) + '.cnf'): - print('Could not find config file /conf/ps' + str(i) + '.cnf') + for i in range(1, self.__number_of_nodes + 1): + conf = node_conf(i) + datadir = node_datadir(i) + if os.path.exists(datadir): + os.system('rm -rf ' + datadir + ' >/dev/null 2>&1') + if not os.path.isfile(conf): + print('Could not find config file ' + conf) exit(1) - version = self.version_check() # Get server version + version = self.version_check() # Get server version + initialize_log = init_log(i) # Initialize data directory if int(version) < int("050700"): - os.mkdir(self.workdir + '/psnode' + str(i)) - initialize_node = self.basedir + '/scripts/mysql_install_db --no-defaults ' \ - '--basedir=' + self.basedir + ' --datadir=' + \ - self.workdir + '/psnode' + str(i) + ' > ' + \ - self.workdir + '/log/ps_startup' + str(i) + '.log 2>&1' + os.mkdir(datadir) + initialize_node = (base_dir + '/scripts/mysql_install_db --no-defaults --basedir=' + base_dir + + ' --datadir=' + datadir + ' > ' + initialize_log + ' 2>&1') else: - initialize_node = self.basedir + '/bin/mysqld --no-defaults ' \ - ' --initialize-insecure --basedir=' + self.basedir + \ - ' --datadir=' + self.workdir + '/psnode' + str(i) + ' > ' + \ - self.workdir + '/log/ps_startup' + str(i) + '.log 2>&1' - if self.debug == 'YES': + initialize_node = (base_dir + '/bin/mysqld --no-defaults --initialize-insecure --basedir=' + base_dir + + ' --datadir=' + datadir + ' > ' + initialize_log + ' 2>&1') + if self.__debug == 'YES': print(initialize_node) run_query = subprocess.call(initialize_node, shell=True, stderr=subprocess.DEVNULL) result = ("{}".format(run_query)) - return int(result) + utility_cmd = utility.Utility(self.__debug) + utility_cmd.check_testcase(int(result), "PS: Initializing PS server") - def start_server(self, my_extra=None): + def start_server(self, my_extra=None, verify_startup: bool = True): """ Method to start the cluster nodes. This method will also check the startup status. """ - ping_status = 1 # return value if my_extra is None: my_extra = '' - for i in range(1, self.node + 1): + ps_nodes = [] + for i in range(1, self.__number_of_nodes + 1): + socket = node_socket(i) + conf = node_conf(i) + err_log = node_err_log(i) + mysqld = base_dir + '/bin/mysqld' + datadir = node_datadir(i) # Start server - startup = self.basedir + '/bin/mysqld --defaults-file=' + self.workdir + \ - '/conf/ps' + str(i) + '.cnf --datadir=' + self.workdir + '/psnode' + str(i) + \ - ' --basedir=' + self.basedir + ' ' + my_extra + \ - ' --log-error=' + self.workdir + \ - '/log/psnode' + str(i) + '.err > ' + self.workdir + \ - '/log/psnode' + str(i) + '.err 2>&1 &' - if self.debug == 'YES': + startup = (mysqld + ' --defaults-file=' + conf + ' --datadir=' + datadir + ' --basedir=' + base_dir + + ' ' + my_extra + ' --log-error=' + err_log + ' > ' + err_log + ' 2>&1 &') + if self.__debug == 'YES': print(startup) - run_cmd = subprocess.call(startup, shell=True, stderr=subprocess.DEVNULL) - result = ("{}".format(run_cmd)) - ping_query = self.basedir + '/bin/mysqladmin --user=root ' \ - '--socket=/tmp/psnode' + str(i) + \ - '.sock ping > /dev/null 2>&1' - for startup_timer in range(120): - time.sleep(1) - ping_check = subprocess.call(ping_query, shell=True, stderr=subprocess.DEVNULL) - ping_status = ("{}".format(ping_check)) - if int(ping_status) == 0: - break # break the loop if mysqld is running - - return int(ping_status) + subprocess.call(startup, shell=True, stderr=subprocess.DEVNULL) + utility_cmd = utility.Utility(self.__debug) + node = db_connection.DbConnection(user=user, socket=socket, node_num=i, data_dir=datadir, conf_file=conf, + err_log=err_log, base_dir=base_dir, debug=self.__debug) + if verify_startup: + utility_cmd.startup_check(node) + ps_nodes.append(node) + + return ps_nodes diff --git a/util/pxc_startup.py b/util/pxc_startup.py index 009e6a1..c0d0b01 100644 --- a/util/pxc_startup.py +++ b/util/pxc_startup.py @@ -1,22 +1,92 @@ #!/usr/bin/env python # Created by Ramesh Sivaraman, Percona LLC. -# This will help us to start Percona XtraDB Cluster +# Updated by Parveez Baig +# This will help us to start Percona XtraDB Cluster, Upgrade cluster nodes, Backup the cluster nodes. import os import subprocess import random import shutil import time -from util import sanity +from distutils.spawn import find_executable + +from util import sanity, utility, sysbench_run +from util import db_connection +from config import * +from util.db_connection import DbConnection +from util.utility import Version, Utility + +workdir = WORKDIR +base_dir = BASEDIR +user = USER + +higher_version_basedir = PXC_UPPER_BASE +lower_base_dir = PXC_LOWER_BASE +DEFAULT_SERVER_UP_TIMEOUT = 300 +backup_dir = workdir + "/backup" + +cwd = os.path.dirname(os.path.realpath(__file__)) +parent_dir = os.path.normpath(os.path.join(cwd, '../')) + +default_pxc_cnf = parent_dir + '/conf/pxc.cnf' +default_custom_cnf = parent_dir + '/conf/custom.cnf' +encryption_cnf = parent_dir + '/conf/encryption.cnf' +workdir_custom_cnf = workdir + '/conf/custom.cnf' +workdir_encryption_cnf = workdir + '/conf/encryption.cnf' + + +def set_base_dir(server_version: Version): + global base_dir + base_dir = BASEDIR + if server_version == Version.LOWER: + base_dir = PXC_LOWER_BASE + elif server_version == Version.HIGHER: + base_dir = PXC_UPPER_BASE + + +def node_conf(node_number: int): + return workdir + '/conf/node' + str(node_number) + '.cnf' + + +def node_socket(node_number: int): + return workdir + '/node' + str(node_number) + '/mysql.sock' + + +def node_err_log(node_number: int): + return workdir + '/log/node' + str(node_number) + '.err' + + +def node_datadir(node_number: int): + return workdir + '/node' + str(node_number) + + +def node_startup_script(node_number: int): + return workdir + '/log/startup' + str(node_number) + '.sh' + + +def init_log(node_number: int): + return workdir + '/log/init' + str(node_number) + '.log' + + +def add_conf(option_values: dict): + cnf_name = open(workdir_custom_cnf, 'a+') + for opt in option_values: + cnf_name.write(opt + '=' + option_values[opt] + '\n') + cnf_name.close() class StartCluster: - def __init__(self, scriptdir, workdir, basedir, node, debug): - self.scriptdir = scriptdir - self.workdir = workdir - self.basedir = basedir - self.node = node - self.debug = debug + def __init__(self, number_of_nodes, debug, server_version: Version = None): + self.__number_of_nodes = int(number_of_nodes) + self.__debug = debug + if Version is not None: + set_base_dir(server_version) + + @staticmethod + def kill_mysqld(): + # kill existing mysqld process + os.system("ps -ef | grep '" + workdir + "/conf/node[0-9].cnf' | grep -v grep | " + "awk '{print $2}' | xargs kill -9 >/dev/null 2>&1") def sanity_check(self): """ Sanity check method will remove existing @@ -24,21 +94,24 @@ def sanity_check(self): running mysqld processes. This will also check the availability of mysqld binary file. """ + result = 0 # kill existing mysqld process - os.system("ps -ef | grep '" + self.workdir + "/conf/node[0-9].cnf' | grep -v grep | " - "awk '{print $2}' | xargs kill -9 >/dev/null 2>&1") - if not os.path.exists(self.workdir + '/log'): - os.mkdir(self.workdir + '/log') + self.kill_mysqld() + + if not os.path.exists(workdir + '/log'): + os.mkdir(workdir + '/log') - if not os.path.exists(self.workdir + '/conf'): - os.mkdir(self.workdir + '/conf') + if not os.path.exists(workdir + '/conf'): + os.mkdir(workdir + '/conf') - if not os.path.isfile(self.basedir + '/bin/mysqld'): - print(self.basedir + '/bin/mysqld does not exist') - return 1 - return 0 + if not os.path.isfile(base_dir + '/bin/mysqld'): + print(base_dir + '/bin/mysqld does not exist') + result = 1 + utility_cmd = utility.Utility(self.__debug) + utility_cmd.check_testcase(result, "Startup sanity check") - def create_config(self, wsrep_extra, wsrep_provider_option=None): + def create_config(self, wsrep_extra, wsrep_provider_option=None, set_admin_address: bool = False, + custom_conf_settings: dict = None, default_encryption_conf: bool = True): """ Method to create cluster configuration file based on the node count. To create configuration file it will take default values from conf/pxc.cnf. @@ -47,23 +120,26 @@ def create_config(self, wsrep_extra, wsrep_provider_option=None): """ if wsrep_provider_option is None: wsrep_provider_option = '' - version = sanity.version_check(self.basedir) + version = Utility.version_check(base_dir) port = random.randint(10, 19) * 1000 port_list = [] addr_list = '' - for j in range(1, int(self.node) + 1): + result = 0 + for j in range(1, self.__number_of_nodes + 1): port_list += [port + (j * 100)] addr_list = addr_list + '127.0.0.1:' + str(port + (j * 100) + 8) + ',' - if not os.path.isfile(self.scriptdir + '/conf/pxc.cnf'): - print('Default pxc.cnf is missing in ' + self.scriptdir + '/conf') - return 1 + if not os.path.isfile(default_pxc_cnf): + print('Default pxc.cnf is missing in ' + default_pxc_cnf) + result = 1 else: - shutil.copy(self.scriptdir + '/conf/custom.cnf', self.workdir + '/conf/custom.cnf') - for i in range(1, self.node + 1): - shutil.copy(self.scriptdir + '/conf/pxc.cnf', - self.workdir + '/conf/node' + str(i) + '.cnf') - cnf_name = open(self.workdir + '/conf/node' + str(i) + '.cnf', 'a+') - if self.debug == 'YES': + shutil.copy(default_custom_cnf, workdir_custom_cnf) + if wsrep_extra == 'encryption' and default_encryption_conf: + shutil.copy(encryption_cnf, workdir_encryption_cnf) + for i in range(1, self.__number_of_nodes + 1): + cnf = node_conf(i) + shutil.copy(default_pxc_cnf, cnf) + cnf_name = open(cnf, 'a+') + if self.__debug == 'YES': cnf_name.write('wsrep-debug=1\n') cnf_name.write('wsrep_cluster_address=gcomm://' + addr_list + '\n') # Calling version check method to compare the version to @@ -77,40 +153,47 @@ def create_config(self, wsrep_extra, wsrep_provider_option=None): if wsrep_extra == "ssl" or wsrep_extra == "encryption": cnf_name.write("wsrep_provider_options='gmcast.listen_addr=tcp://127.0.0.1:" + str(port_list[i - 1] + 8) + ';' + wsrep_provider_option + 'socket.ssl_key=' - + self.workdir + '/cert/server-key.pem;socket.ssl_cert=' - + self.workdir + '/cert/server-cert.pem;socket.ssl_ca=' - + self.workdir + "/cert/ca.pem'\n") - cnf_name.write('!include ' + self.workdir + '/conf/ssl.cnf\n') - sanity.create_ssl_certificate(self.workdir) + + workdir + '/cert/server-key.pem;socket.ssl_cert=' + + workdir + '/cert/server-cert.pem;socket.ssl_ca=' + + workdir + "/cert/ca.pem'\n") + cnf_name.write('!include ' + workdir + '/conf/ssl.cnf\n') + sanity.create_ssl_certificate(workdir) else: cnf_name.write("wsrep_provider_options='gmcast.listen_addr=tcp://127.0.0.1:" + str(port_list[i - 1] + 8) + ';' + wsrep_provider_option + "'\n") - cnf_name.write('socket = ' + self.workdir + '/node' + str(i) + '/mysql.sock\n') + cnf_name.write('socket=' + node_socket(i) + '\n') cnf_name.write('server_id=' + str(10 + i) + '\n') - cnf_name.write('!include ' + self.workdir + '/conf/custom.cnf\n') - if wsrep_extra == 'encryption': - shutil.copy(self.scriptdir + '/conf/encryption.cnf', self.workdir + '/conf/encryption.cnf') - cnf_name.write('!include ' + self.workdir + '/conf/encryption.cnf\n') - cnf_name.write('pxc_encrypt_cluster_traffic = ON\n') - else: - if int(version) > int("050700"): + cnf_name.write('!include ' + workdir_custom_cnf + '\n') + if default_encryption_conf: + if wsrep_extra == 'encryption': + cnf_name.write('!include ' + workdir_encryption_cnf + '\n') + cnf_name.write('pxc_encrypt_cluster_traffic = ON\n') + elif int(version) > int("050700"): cnf_name.write('pxc_encrypt_cluster_traffic = OFF\n') + if set_admin_address: + cnf_name.write('admin_address=127.0.0.1\n') + cnf_name.write('admin_port=' + str(33062 + i) + '\n') cnf_name.close() - return 0 + if custom_conf_settings is not None: + add_conf(custom_conf_settings) + utility_cmd = utility.Utility(self.__debug) + utility_cmd.check_testcase(result, "Configuration file creation") def add_myextra_configuration(self, config_file): """ Adding extra configurations based on the testcase """ + result = 0 if not os.path.isfile(config_file): print('Custom config ' + config_file + ' is missing') - return 1 + result = 1 config_file = config_file - cnf_name = open(self.workdir + '/conf/custom.cnf', 'a+') + cnf_name = open(workdir_custom_cnf, 'a+') cnf_name.write('\n') cnf_name.write('!include ' + config_file + '\n') cnf_name.close() - return 0 + utility_cmd = utility.Utility(self.__debug) + utility_cmd.check_testcase(result, "PXC: Adding custom configuration") def initialize_cluster(self, init_extra=None): """ Method to initialize the cluster database @@ -124,72 +207,265 @@ def initialize_cluster(self, init_extra=None): # This is for encryption testing. Encryption features are not fully supported # if wsrep_extra == "encryption": # init_opt = '--innodb_undo_tablespaces=2 ' - for i in range(1, self.node + 1): - if os.path.exists(self.workdir + '/node' + str(i)): - os.system('rm -rf ' + self.workdir + '/node' + str(i) + '>/dev/null 2>&1') - if not os.path.isfile(self.workdir + '/conf/node' + str(i) + '.cnf'): - print('Could not find config file /conf/node' + str(i) + '.cnf') + for i in range(1, self.__number_of_nodes + 1): + conf = node_conf(i) + datadir = node_datadir(i) + initialize_log = init_log(i) + if os.path.exists(datadir): + os.system('rm -rf ' + datadir + '>/dev/null 2>&1') + if not os.path.isfile(conf): + print('Could not find config file ' + conf) exit(1) - version = sanity.version_check(self.basedir) + version = Utility.version_check(base_dir) if int(version) < int("050700"): - os.mkdir(self.workdir + '/node' + str(i)) - initialize_node = self.basedir + '/scripts/mysql_install_db --no-defaults ' \ - '--basedir=' + self.basedir + ' --datadir=' + \ - self.workdir + '/node' + str(i) + ' > ' + \ - self.workdir + '/log/startup' + str(i) + '.log 2>&1' + os.mkdir(datadir) + initialize_node = (base_dir + '/scripts/mysql_install_db --no-defaults --basedir=' + base_dir + + ' --datadir=' + datadir + ' > ' + initialize_log + ' 2>&1') else: - initialize_node = self.basedir + '/bin/mysqld --no-defaults ' \ - ' --initialize-insecure ' + init_extra + ' --basedir=' + self.basedir + \ - ' --datadir=' + self.workdir + '/node' + str(i) + ' > ' + \ - self.workdir + '/log/startup' + str(i) + '.log 2>&1' - if self.debug == 'YES': + initialize_node = (base_dir + '/bin/mysqld --no-defaults --initialize-insecure ' + init_extra + + ' --basedir=' + base_dir + ' --datadir=' + datadir + ' > ' + initialize_log + + ' 2>&1') + if self.__debug == 'YES': print(initialize_node) run_query = subprocess.call(initialize_node, shell=True, stderr=subprocess.DEVNULL) result = ("{}".format(run_query)) - return int(result) + utility_cmd = utility.Utility(self.__debug) + utility_cmd.check_testcase(int(result), "Initializing cluster") - def start_cluster(self, my_extra=None): + def start_cluster(self, my_extra=None, terminate_on_startup_failure: bool = True): """ Method to start the cluster nodes. This method will also check the startup status. """ - ping_status = "" + result = 1 if my_extra is None: my_extra = '' - for i in range(1, self.node + 1): + pxc_nodes = [] + for i in range(1, self.__number_of_nodes + 1): + socket = node_socket(i) + conf = node_conf(i) + err_log = node_err_log(i) + mysqld = base_dir + '/bin/mysqld' + datadir = node_datadir(i) + startup_script = node_startup_script(i) + + startup = (mysqld + ' --defaults-file=' + conf + + ' --datadir=' + datadir + + ' --basedir=' + base_dir + ' ' + my_extra + + ' --wsrep-provider=' + base_dir + '/lib/libgalera_smm.so') if i == 1: - startup = self.basedir + '/bin/mysqld --defaults-file=' + self.workdir + '/conf/node' + str(i) + \ - '.cnf --datadir=' + self.workdir + '/node' + str(i) + \ - ' --basedir=' + self.basedir + ' ' + my_extra + \ - ' --wsrep-provider=' + self.basedir + \ - '/lib/libgalera_smm.so --wsrep-new-cluster --log-error=' + self.workdir + \ - '/log/node' + str(i) + '.err > ' + self.workdir + '/log/node' + str(i) + '.err 2>&1 &' - else: - startup = self.basedir + '/bin/mysqld --defaults-file=' + self.workdir + '/conf/node' + str(i) + \ - '.cnf --datadir=' + self.workdir + '/node' + str(i) + \ - ' --basedir=' + self.basedir + ' ' + my_extra + \ - ' --wsrep-provider=' + self.basedir + \ - '/lib/libgalera_smm.so --log-error=' + self.workdir + '/log/node' + str(i) + '.err > ' \ - + self.workdir + '/log/node' + str(i) + '.err 2>&1 &' - save_startup = 'echo "' + startup + '" > ' + self.workdir + \ - '/log/startup' + str(i) + '.sh' + startup = startup + ' --wsrep-new-cluster' + startup = startup + ' --log-error=' + err_log + ' > ' + err_log + ' 2>&1 &' + + save_startup = 'echo "' + startup + '" > ' + startup_script os.system(save_startup) - if self.debug == 'YES': + if self.__debug == 'YES': print(startup) subprocess.call(startup, shell=True, stderr=subprocess.DEVNULL) - ping_query = self.basedir + '/bin/mysqladmin --user=root --socket=' + self.workdir + \ - '/node' + str(i) + '/mysql.sock ping > /dev/null 2>&1' - for startup_timer in range(120): - ping_check = subprocess.call(ping_query, shell=True, stderr=subprocess.DEVNULL) - ping_status = ("{}".format(ping_check)) - if int(ping_status) == 0: - query = self.basedir + '/bin/mysql --user=root ' \ - '--socket=' + self.workdir + '/node' + str(i) + '/mysql.sock -Bse"' \ - "delete from mysql.user where user='';" \ - '" > /dev/null 2>&1' - if self.debug == 'YES': - print(query) - os.system(query) - break # break the loop if mysqld is running - time.sleep(1) - - return int(ping_status) + utility_cmd = utility.Utility(self.__debug) + node = db_connection.DbConnection(user=user, socket=socket, node_num=i, data_dir=datadir, conf_file=conf, + err_log=err_log, base_dir=base_dir, startup_script=startup_script, + debug=self.__debug) + result = utility_cmd.startup_check(node, terminate_on_startup_failure) + pxc_nodes.append(node) + if result != 0: + return [] + return pxc_nodes + + @staticmethod + def join_new_node(donor: DbConnection, joiner_node_number: int, basedir: str = base_dir, debug: str = 'NO'): + joiner_node_cnf = node_conf(joiner_node_number) + startup_script = node_startup_script(joiner_node_number) + joiner_data_dir = node_datadir(joiner_node_number) + donor_node_num = donor.get_node_number() + shutil.copy(donor.get_conf_file(), joiner_node_cnf) + wsrep_cluster_addr = donor.execute_get_row("show variables like 'wsrep_cluster_address'")[1] + port_no = donor.get_port() + + port_no = int(port_no) + 100 + wsrep_port_no = int(port_no) + 8 + os.system("sed -i 's#node" + str(donor_node_num) + "#node" + str(joiner_node_number) + "#g' " + joiner_node_cnf) + os.system("sed -i '/wsrep_sst_auth=root:/d' " + joiner_node_cnf) + os.system("sed -i '0,/^[ \\t]*wsrep_cluster_address[ \\t]*=.*$/s|" + "^[ \\t]*wsrep_cluster_address[ \\t]*=.*$|wsrep_cluster_address=" + + wsrep_cluster_addr + "127.0.0.1:" + str(wsrep_port_no) + "|' " + + joiner_node_cnf) + os.system("sed -i '0,/^[ \\t]*port[ \\t]*=.*$/s|" + "^[ \\t]*port[ \\t]*=.*$|port=" + + str(port_no) + "|' " + joiner_node_cnf) + os.system('sed -i "0,/^[ \\t]*wsrep_provider_options[ \\t]*=.*$/s|' + "^[ \\t]*wsrep_provider_options[ \\t]*=.*$|wsrep_provider_options=" + "'gmcast.listen_addr=tcp://127.0.0.1:" + str(wsrep_port_no) + + "'|\" " + joiner_node_cnf) + os.system("sed -i '0,/^[ \\t]*server_id[ \\t]*=.*$/s|" + "^[ \\t]*server_id[ \\t]*=.*$|server_id=" + "14|' " + joiner_node_cnf) + + create_upgrade_startup = ( + 'sed "s#' + lower_base_dir + '#' + basedir + '#g" ' + node_startup_script(donor_node_num) + + ' > ' + startup_script) + if debug == 'YES': + print(create_upgrade_startup) + os.system(create_upgrade_startup) + os.system("sed -i 's#node" + str(donor_node_num) + + "#node" + str(joiner_node_number) + "#g' " + startup_script) + os.system("rm -rf " + joiner_data_dir) + os.mkdir(joiner_data_dir) + + time.sleep(10) + joiner = db_connection.DbConnection(user=user, socket=node_socket(joiner_node_number), + node_num=joiner_node_number, data_dir=joiner_data_dir, + conf_file=joiner_node_cnf, err_log=node_err_log(joiner_node_number), + base_dir=base_dir, startup_script=startup_script, debug=debug) + utility_cmd = utility.Utility(debug) + utility_cmd.restart_cluster_node(joiner) + utility_cmd.startup_check(joiner) + utility_cmd.wait_for_wsrep_status(joiner) + + return joiner + + @staticmethod + def join_new_upgraded_node(donor: DbConnection, joiner_node_number: int, debug: str = 'NO'): + return StartCluster.join_new_node(donor, joiner_node_number, higher_version_basedir, debug) + + @staticmethod + def upgrade_pxc_node(node: DbConnection, debug, node_to_add_load: DbConnection = None, config_replace: dict = None, + node_sync_timeout: int = DEFAULT_SERVER_UP_TIMEOUT): + node.shutdown() + time.sleep(60) + + if node_to_add_load is not None: + sysbench = sysbench_run.SysbenchRun(node_to_add_load, debug) + sysbench.sanity_check('test_one') + sysbench.sanity_check('test_two') + sysbench.sanity_check('test_three') + sysbench.test_sysbench_load('test_one', SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, + SYSBENCH_LOAD_TEST_TABLE_SIZE) + sysbench.test_sysbench_load('test_two', SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, + SYSBENCH_LOAD_TEST_TABLE_SIZE) + sysbench.test_sysbench_load('test_three', SYSBENCH_TABLE_COUNT, SYSBENCH_THREADS, + SYSBENCH_LOAD_TEST_TABLE_SIZE) + + utility_cmd = utility.Utility(debug) + version = utility_cmd.version_check(PXC_UPPER_BASE) + if int(version) > int("080000"): + os.system("sed -i '/wsrep_sst_auth=root:/d' " + node.get_conf_file()) + if config_replace is not None: + for cnf in config_replace: + os.system("sed -i 's#" + cnf + "=.*#" + cnf + "=" + config_replace[cnf] + "#g' " + + node.get_conf_file()) + startup_cmd = (higher_version_basedir + '/bin/mysqld --defaults-file=' + node.get_conf_file() + + ' --wsrep-provider=' + higher_version_basedir + '/lib/libgalera_smm.so --datadir=' + + node.get_data_dir() + ' --basedir=' + higher_version_basedir + ' --log-error=' + + node.get_error_log() + ' >> ' + node.get_error_log() + ' 2>&1 &') + utility_cmd.check_testcase(0, "Starting cluster node with upgraded version") + else: + startup_cmd = (higher_version_basedir + '/bin/mysqld --defaults-file=' + node.get_conf_file() + + ' --datadir=' + node.get_data_dir() + ' --basedir=' + higher_version_basedir + + ' --wsrep-provider=none --log-error=' + node.get_error_log() + + ' >> ' + node.get_error_log() + ' 2>&1 &') + if debug == 'YES': + print(startup_cmd) + os.system(startup_cmd) + utility_cmd.startup_check(node) + utility_cmd.wait_for_wsrep_status(node, node_sync_timeout) + if int(version) < int("080000"): + upgrade_cmd = (higher_version_basedir + '/bin/mysql_upgrade -uroot --socket=' + node.get_socket() + ' > ' + + node.get_error_log() + ' 2>&1') + if debug == 'YES': + print(upgrade_cmd) + result = os.system(upgrade_cmd) + utility_cmd.check_testcase(result, "Cluster node" + str(node.get_node_number()) + " upgrade is successful") + node.shutdown() + time.sleep(30) + utility_cmd.check_testcase(0, "Shutdown cluster node" + str(node.get_node_number()) + " after upgrade run") + create_startup = ('sed -i "s#' + lower_base_dir + '#' + higher_version_basedir + '#g" ' + workdir + + node.get_startup_script()) + if debug == 'YES': + print(create_startup) + os.system(create_startup) + if int(node.get_node_number()) == 1: + remove_bootstrap_option = 'sed -i "s#--wsrep-new-cluster##g" ' + node.get_startup_script() + if debug == 'YES': + print(remove_bootstrap_option) + os.system(remove_bootstrap_option) + time.sleep(5) + + upgrade_startup = "bash " + node.get_startup_script() + if debug == 'YES': + print(upgrade_startup) + result = os.system(upgrade_startup) + utility_cmd.check_testcase(result, + "Starting cluster node" + str(node.get_node_number()) + " after upgrade run") + utility_cmd.startup_check(node) + utility_cmd.wait_for_wsrep_status(node) + + @staticmethod + def pxb_sanity_check(node: DbConnection, version: str): + """ This method will check pxb installation and + cleanup backup directory + """ + # Check xtrabackup installation + if find_executable('xtrabackup') is None: + print('\tERROR! Percona Xtrabackup is not installed.') + exit(1) + + # Recreate backup directory + if os.path.exists(backup_dir): + shutil.rmtree(backup_dir) + os.mkdir(backup_dir) + + # Check PXC version and create XB user with mysql_native_password plugin. + if int(version) < int("050700"): + queries = ["create user 'xbuser'@'localhost' identified by 'test'", + "grant all on *.* to xbuser'@'localhost'"] + else: + queries = ["create user 'xbuser'@'localhost' identified with mysql_native_password by 'test'", + "grant all on *.* to 'xbuser'@'localhost'"] + node.execute_queries(queries) + + @staticmethod + def pxb_backup(node: DbConnection, encryption: str, copy_back_to_ps_node: bool = False, debug: str = 'NO'): + """ This method will backup PXC/PS data directory + with the help of xtrabackup. + """ + # Enable keyring file plugin if it is encryption run + if encryption == 'YES': + backup_extra = " --keyring-file-data=" + node.get_data_dir() + \ + "/keyring --early-plugin-load='keyring_file=keyring_file.so'" + else: + backup_extra = '' + + # Backup data using xtrabackup + backup_cmd = ("xtrabackup --user=xbuser --password='test' --backup --target-dir=" + backup_dir + + " -S" + node.get_socket() + " --datadir=" + node.get_data_dir() + " " + + backup_extra + " --lock-ddl >" + workdir + "/log/xb_backup.log 2>&1") + if debug == 'YES': + print(backup_cmd) + os.system(backup_cmd) + + # Prepare backup for node startup + prepare_backup = ("xtrabackup --prepare --target_dir=" + backup_dir + ' ' + backup_extra + + " --lock-ddl >" + workdir + "/log/xb_backup_prepare.log 2>&1") + if debug == 'YES': + print(prepare_backup) + os.system(prepare_backup) + + # copy backup directory to destination + if copy_back_to_ps_node: + dest_datadir = workdir + '/psnode1' + if os.path.exists(dest_datadir): + shutil.rmtree(dest_datadir) + copy_backup = ("xtrabackup --copy-back --target-dir=" + backup_dir + " --datadir=" + dest_datadir + + " " + backup_extra + " --lock-ddl >" + workdir + "/log/copy_backup.log 2>&1") + if debug == 'YES': + print(copy_backup) + os.system(copy_backup) + + # Copy keyring file to destination directory for encryption startup + if encryption == 'YES': + os.system("cp " + node.get_data_dir() + "/keyring " + dest_datadir) + + if debug == 'YES': + print("Backup dir path: ", backup_dir) + + return backup_dir diff --git a/util/pxc_util.py b/util/pxc_util.py new file mode 100644 index 0000000..97bdd0e --- /dev/null +++ b/util/pxc_util.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 +import os +import sys +import argparse + +from util.pxc_startup import StartCluster + +cwd = os.path.dirname(os.path.realpath(__file__)) +parent_dir = os.path.normpath(os.path.join(cwd, '../../')) +sys.path.insert(0, parent_dir) +from base_test import BaseTest +from config import * +from util import utility, db_connection + +# Read argument +parser = argparse.ArgumentParser(prog='PXC Utility', usage='%(prog)s [options]') +parser.add_argument('-e', '--encryption-run', action='store_true', + help='This option will enable encryption options') +parser.add_argument('--start', action='store_true', + help='Start PXC nodes') +parser.add_argument('--stop', action='store_true', + help='Stop PXC nodes') +parser.add_argument('-d', '--debug', action='store_true', + help='This option will enable debug logging') +args = parser.parse_args() +if args.encryption_run is True: + encryption = 'YES' +else: + encryption = 'NO' +if args.debug is True: + debug = 'YES' +else: + debug = 'NO' + +utility_cmd = utility.Utility(debug) +utility_cmd.check_python_version() + + +class PXCUtil(BaseTest): + def __init__(self): + super().__init__(my_extra='--max-connections=1500') + + +pxc_util = PXCUtil() +if args.start is True: + # Start Cluster + pxc_util.start_pxc() + for node in pxc_util.pxc_nodes: + # Print connection string + print('\t' + BASEDIR + '/bin/mysql --user=root --socket=' + node.get_socket()) + utility_cmd.check_testcase(0, "PXC connection string") + +if args.stop is True: + StartCluster.kill_mysqld() diff --git a/util/rqg_datagen.py b/util/rqg_datagen.py index e5659d0..41da52b 100644 --- a/util/rqg_datagen.py +++ b/util/rqg_datagen.py @@ -1,64 +1,59 @@ import os import configparser from util import utility - +from util.db_connection import DbConnection # Reading initial configuration config = configparser.ConfigParser() script_dir = os.path.dirname(os.path.realpath(__file__)) parent_dir = os.path.normpath(os.path.join(script_dir, '../')) -config.read(parent_dir + '/config.ini') -workdir = config['config']['workdir'] -basedir = config['config']['basedir'] -user = config['config']['user'] +rand_gen_dir = parent_dir + '/randgen' +gen_data_pl = rand_gen_dir + '/gendata.pl' class RQGDataGen: - def __init__(self, basedir, workdir, user, debug): - self.basedir = basedir - self.workdir = workdir - self.user = user + def __init__(self, node: DbConnection, debug): + self.__node = node self.debug = debug self.utility_cmd = utility.Utility(debug) - self.version = self.utility_cmd.version_check(self.basedir) + self.version = self.utility_cmd.version_check(node.get_base_dir()) - def initiate_rqg(self, module, db, socket): + def initiate_rqg(self, module, db, work_dir): """ Method to initiate RQD data load against Percona XtraDB cluster. """ + port = self.__node.execute_get_value("select @@port") + queries = ['drop database if exists ' + db, 'create database ' + db] + self.__node.execute_queries(queries) # Get RQG module - module = parent_dir + '/randgen/conf/' + module - master_port = self.basedir + "/bin/mysql --user=root --socket=" + socket + \ - ' -Bse"select @@port" 2>&1' - port = os.popen(master_port).read().rstrip() + module = rand_gen_dir + '/conf/' + module # Create schema for RQG run - create_db = self.basedir + "/bin/mysql --user=root --socket=" + socket + \ - ' -Bse"drop database if exists ' + db + \ - ';create database ' + db + ';" 2>&1' - os.system(create_db) + if int(self.version) > int("050700"): - create_user = self.basedir + "/bin/mysql --user=root --socket=" + socket + \ - ' -Bse" drop user if exists \'rqg_test\'@\'%\'; FLUSH PRIVILEGES; ' \ - 'create user rqg_test@\'%\' identified with mysql_native_password by \'\'; ' \ - 'grant all on *.* to rqg_test@\'%\';" 2>&1' - os.system(create_user) + queries = ["drop user if exists 'rqg_test'@'%'", + "create user rqg_test@'%' identified with mysql_native_password by ''", + "grant all on *.* to rqg_test@'%'"] + self.__node.execute_queries(queries) + # Checking RQG module - os.chdir(parent_dir + '/randgen') + os.chdir(rand_gen_dir) if not os.path.exists(module): print(module + ' does not exist in RQG') exit(1) # Run RQG for file in os.listdir(module): if file.endswith(".zz"): - rqg_command = "perl " + parent_dir + "/randgen/gendata.pl " \ - "--dsn=dbi:mysql:host=127.0.0.1:port=" \ - + port + ":user=" + self.user + ":database=" + db + " --spec=" + \ + rqg_command = "perl " + gen_data_pl + \ + " --dsn=dbi:mysql:host=127.0.0.1:port=" \ + + str(port) + ":user=" + self.__node.get_user() + ":database=" + db + " --spec=" + \ module + '/' + file + " > " + \ - self.workdir + "/log/rqg_run.log 2>&1" + work_dir + "/log/rqg_run.log 2>&1" + if self.debug == 'YES': + print(rqg_command) result = os.system(rqg_command) self.utility_cmd.check_testcase(result, "RQG data load (DB: " + db + ")") - def pxc_dataload(self, socket): + def pxc_dataload(self, work_dir): """ RQG data load for PXC Server """ @@ -66,5 +61,5 @@ def pxc_dataload(self, socket): rqg_config = ['galera', 'transactions', 'gis', 'runtime', 'temporal'] else: rqg_config = ['galera', 'transactions', 'partitioning', 'gis', 'runtime', 'temporal'] - for config in rqg_config: - self.initiate_rqg(config, 'db_' + config, socket) + for conf in rqg_config: + self.initiate_rqg(conf, 'db_' + conf, work_dir) diff --git a/util/sanity.py b/util/sanity.py index 08b2781..efe7d4d 100644 --- a/util/sanity.py +++ b/util/sanity.py @@ -1,28 +1,7 @@ #!/usr/bin/env python3 import os -import sys import subprocess import shutil -from datetime import datetime - - -def check_python_version(): - """ Check python version. Raise error if the - version is 3.5 or greater - """ - if sys.version_info < (3, 5): - print("\nError! You should use python 3.7 or greater\n") - exit(1) - - -def version_check(basedir): - # Get database version number - version_info = os.popen(basedir + "/bin/mysqld --version 2>&1 " - "| grep -oe '[0-9]\.[0-9][\.0-9]*' | head -n1").read() - version = "{:02d}{:02d}{:02d}".format(int(version_info.split('.')[0]), - int(version_info.split('.')[1]), - int(version_info.split('.')[2])) - return version def create_ssl_certificate(workdir): diff --git a/util/sysbench_run.py b/util/sysbench_run.py index c869b9c..2fc17d5 100644 --- a/util/sysbench_run.py +++ b/util/sysbench_run.py @@ -2,361 +2,341 @@ import itertools import sys from config import * +from util.db_connection import DbConnection + cwd = os.path.dirname(os.path.realpath(__file__)) parent_dir = os.path.normpath(os.path.join(cwd, '../')) sys.path.insert(0, parent_dir) from util import utility + SYSBENCH_DB_CONNECT = " --mysql-user=" + SYSBENCH_USER + \ - " --mysql-password=" + SYSBENCH_PASS + " --db-driver=mysql " + " --mysql-password=" + SYSBENCH_PASS + " --db-driver=mysql " EXPORT_LUA_PATH = 'export SBTEST_SCRIPTDIR="' + parent_dir + \ - '/sysbench_lua"; export LUA_PATH="' + parent_dir + \ - '/sysbench_lua/?;' + parent_dir + '/sysbench_lua/?.lua"' + '/sysbench_lua"; export LUA_PATH="' + parent_dir + \ + '/sysbench_lua/?;' + parent_dir + '/sysbench_lua/?.lua"' + +lua_dir = parent_dir + "/sysbench_lua/" + class SysbenchRun: - def __init__(self, basedir, workdir, socket, debug): - self.basedir = basedir - self.workdir = workdir - self.socket = socket - self.debug = debug - self.utility_cmd = utility.Utility(debug) + def __init__(self, node: DbConnection, debug): + self.__node = node + self.__debug = debug + self.__utility_cmd = utility.Utility(debug) + self.__log_dir = WORKDIR + "/log/" def sanity_check(self, db): # Sanity check for sysbench run check_sybench = os.system('which sysbench >/dev/null 2>&1') if check_sybench != 0: print("ERROR!: sysbench package is not installed") - # Create schema for sysbench run - query = self.basedir + "/bin/mysql --user=root --socket=" + \ - self.socket + ' -e"drop database if exists ' + \ - db + "; create database " + \ - db + ';" > /dev/null 2>&1' - if self.debug == 'YES': - print(query) - query_status = os.system(query) - if int(query_status) != 0: - print("ERROR!: Could not create sysbench test database(" + db + ")") - exit(1) - version = self.utility_cmd.version_check(self.basedir) # Get version + + queries = ["drop database if exists " + db, "create database " + db] + self.__node.execute_queries(queries) + + version = self.__utility_cmd.version_check(self.__node.get_base_dir()) # Get version + create_user_query = ("create user if not exists " + SYSBENCH_USER + "@'localhost' identified by '" + + SYSBENCH_PASS + "'") + grant_query = "grant all on *.* to " + SYSBENCH_USER + "@'localhost'" + # Create sysbench user if int(version) < int("050700"): - create_user = self.basedir + "/bin/mysql --user=root " \ - "--socket=" + self.socket + ' -e"grant all on *.* to ' + \ - SYSBENCH_USER + "@'localhost' identified by '" + SYSBENCH_PASS + "'" \ - ';" > /dev/null 2>&1' + grant_query = grant_query + " identified by '" + SYSBENCH_PASS + "'" + self.__node.execute(grant_query) else: - create_user = self.basedir + "/bin/mysql --user=root --socket=" + \ - self.socket + ' -e"create user if not exists ' + \ - SYSBENCH_USER + "@'localhost' identified with mysql_native_password by '" + \ - SYSBENCH_PASS + "';grant all on *.* to " + SYSBENCH_USER + "@'localhost'" \ - ';" > /dev/null 2>&1' - if self.debug == 'YES': - print(create_user) - query_status = os.system(create_user) - if int(query_status) != 0: - print("ERROR!: Could not create sysbench user : sysbench") - return 1 + self.__node.execute_queries([create_user_query, grant_query]) return 0 + def test_sanity_check(self, db): + result = self.sanity_check(db) + self.__utility_cmd.check_testcase(result, "Sysbench run sanity check") + + def get_params(self, lua_script, table_size, tables, threads, db, log_name): + return {'lua': lua_dir + lua_script, + 'table-size': str(table_size), + 'tables': str(tables), + 'threads': str(threads), + 'db': db, + 'socket': self.__node.get_socket(), + 'log-file': self.__log_dir + log_name, + 'user': SYSBENCH_USER, + 'password': SYSBENCH_PASS} + def sysbench_load(self, db, tables, threads, table_size): - # Sysbench data load - query = EXPORT_LUA_PATH + ";sysbench " + parent_dir + \ - "/sysbench_lua/oltp_insert.lua" \ - " --table-size=" + str(table_size) + \ - " --tables=" + str(tables) + \ - " --threads=" + str(threads) + \ - " --mysql-db=" + db + " " + SYSBENCH_DB_CONNECT + \ - " --mysql-socket=" + self.socket + " prepare >" + \ - self.workdir + "/log/sysbench_prepare.log" - if self.debug == 'YES': - print(query) - query_status = os.system(query) - if int(query_status) != 0: - print("ERROR!: sysbench data load run is failed") - return 1 - return 0 + params = self.get_params("oltp_insert.lua", table_size, tables, threads, + db, "sysbench_prepare.log") + + query = ("sysbench {lua} --table-size={table-size} --tables={tables} --threads={threads} --mysql-db={db} " + "--mysql-user={user} --mysql-password={password} --db-driver=mysql " + "--mysql-socket={socket} prepare > {log-file}").format(**params) + + return self.execute_sysbench_query(query) + + def test_sysbench_load(self, db, tables=SYSBENCH_TABLE_COUNT, threads=SYSBENCH_THREADS, + table_size=SYSBENCH_NORMAL_TABLE_SIZE, use_load_table_size: bool = False): + if use_load_table_size: + table_size = SYSBENCH_LOAD_TEST_TABLE_SIZE + result = self.sysbench_load(db, tables, threads, table_size) + self.__utility_cmd.check_testcase(result, "Sysbench data load with threads " + str(threads)) def sysbench_ts_encryption(self, db, threads): # Check InnoDB system tablespace encryption - check_system_ts_encryption = self.basedir + "/bin/mysql --user=root --socket=" + \ - self.socket + ' -Bse"select encryption from information_schema.innodb_tablespaces ' \ - "where name='innodb_system';\" 2>&1" - if self.debug == 'YES': - print(check_system_ts_encryption) - check_system_ts_encryption = os.popen(check_system_ts_encryption).read().rstrip() - + system_ts_encryption_query = ("select encryption from information_schema.innodb_tablespaces where " + "name='innodb_system'") + if self.__debug == 'YES': + print(system_ts_encryption_query) + check_system_ts_encryption = self.__node.execute_get_value(system_ts_encryption_query) + check_table_encryption_query = "select @@default_table_encryption" + if self.__debug == 'YES': + print(check_table_encryption_query) # Check default_table_encryption status - check_table_encryption = self.basedir + "/bin/mysql --user=root --socket=" + \ - self.socket + ' -Bse"select @@default_table_encryption;" 2>&1' - if self.debug == 'YES': - print(check_table_encryption) - check_table_encryption = os.popen(check_table_encryption).read().rstrip() + check_table_encryption = self.__node.execute_get_value(check_table_encryption_query) for i in range(1, int(threads) - 4): - query = self.basedir + "/bin/mysql --user=root --socket=" + \ - self.socket + ' -e"CREATE TABLESPACE ts' + \ - str(i) + " ADD DATAFILE 'ts" + str(i) + ".ibd' encryption='Y';\" > /dev/null 2>&1" - if self.debug == 'YES': - print("CREATE TABLESPACE ts" + str(i) + " ADD DATAFILE 'ts" + str(i) + ".ibd' encryption='Y';") - query_status = os.system(query) - if int(query_status) != 0: - print("ERROR!: Could not create tablespace ts" + str(i)) - exit(1) - if check_table_encryption == 'ON': - query = self.basedir + "/bin/mysql --user=root --socket=" + \ - self.socket + ' -e"ALTER TABLE ' + db + '.sbtest' + \ - str(i) + ' tablespace ts' + str(i) + ' ;" > /dev/null 2>&1' - if self.debug == 'YES': - print("ALTER TABLE " + db + '.sbtest' + str(i) + ' tablespace ts' + str(i) + ';') - query_status = os.system(query) - if int(query_status) != 0: - print("ERROR!: Could not alter table sbtest" + str(i)) - exit(1) + query = "CREATE TABLESPACE ts" + str(i) + " ADD DATAFILE 'ts" + str(i) + ".ibd' encryption='Y'" + self.__node.execute(query) + if check_table_encryption == 'ON' or check_table_encryption == '1': + query = "ALTER TABLE " + db + ".sbtest" + str(i) + " tablespace ts" + str(i) + self.__node.execute(query) + if check_system_ts_encryption == 'Y': - if check_table_encryption == 'OFF': - query = self.basedir + "/bin/mysql --user=root --socket=" + \ - self.socket + ' -e"ALTER TABLE ' + db + '.sbtest' + \ - str(i+5) + " encryption='Y' ;\" > /dev/null 2>&1" - if self.debug == 'YES': - print("ALTER TABLE " + db + '.sbtest' + str(i+5) + "encryption='Y';" ) - query_status = os.system(query) - if int(query_status) != 0: - print("ERROR!: Could not alter table sbtest" + str(i)) - exit(1) - query = self.basedir + "/bin/mysql --user=root --socket=" + \ - self.socket + ' -e"ALTER TABLE ' + db + '.sbtest' + \ - str(i + 5) + ' tablespace=innodb_system;" > /dev/null 2>&1' - if self.debug == 'YES': - print("ALTER TABLE " + db + '.sbtest' + str(i + 5) + ' tablespace=innodb_system;') - query_status = os.system(query) - if int(query_status) != 0: - print("ERROR!: Could not alter table sbtest" + str(i)) - exit(1) - else: - if check_table_encryption == 'ON': - query = self.basedir + "/bin/mysql --user=root --socket=" + \ - self.socket + ' -e"ALTER TABLE ' + db + '.sbtest' + \ - str(i + 5) + ' tablespace=innodb_system;" > /dev/null 2>&1' - if self.debug == 'YES': - print("ALTER TABLE " + db + '.sbtest' + str(i + 5) + ' tablespace=innodb_system;') - query_status = os.system(query) - if int(query_status) != 0: - print("ERROR!: Could not alter table sbtest" + str(i)) - exit(1) + if check_table_encryption == 'OFF' or check_table_encryption == '0': + query = 'ALTER TABLE ' + db + '.sbtest' + str(i + 5) + " encryption='Y'" + self.__node.execute(query) + + query = 'ALTER TABLE ' + db + '.sbtest' + str(i + 5) + ' tablespace=innodb_system' + self.__node.execute(query) return 0 - def sysbench_custom_oltp_load(self, db, table_count, thread, table_size): + def sysbench_custom_oltp_load(self, db, table_count, threads, table_size=SYSBENCH_OLTP_TEST_TABLE_SIZE): # Create sysbench table structure result = self.sysbench_load(db, table_count, table_count, 10000) - self.utility_cmd.check_testcase(result, "Sysbench data load") - rand_types = ['uniform', 'gaussian', 'special', 'pareto'] - delete_inserts = [10, 20, 30, 40, 50] - index_updates = [10, 20, 30, 40, 50] - non_index_updates = [10, 20, 30, 40, 50] + self.__utility_cmd.check_testcase(result, "Sysbench data load") + + params = self.get_params('oltp_read_write.lua', table_size, table_count, + threads, db, "sysbench_oltp_read_write.log") + params['time'] = str(10) + + rand_types = ['uniform', 'pareto'] # 'gaussian', 'special' + delete_inserts = [10, 50] # 20, 30, 40 + index_updates = [10, 50] # 20, 30, 40 + non_index_updates = [10, 50] # 20, 30, 40 for rand_type, delete_insert, index_update, non_index_update in \ itertools.product(rand_types, delete_inserts, index_updates, non_index_updates): - query = EXPORT_LUA_PATH + ";sysbench " + parent_dir + \ - "/sysbench_lua/oltp_read_write.lua" \ - " --table-size=" + str(table_size) + \ - " --tables=" + str(table_count) + \ - " --threads=" + str(thread) + \ - " --mysql-db=" + db + " " + SYSBENCH_DB_CONNECT + \ - " --mysql-socket=" + self.socket + \ - " --rand_type=" + rand_type + \ - " --db-ps-mode=disable --delete_inserts=" + str(delete_insert) + \ - " --index_updates=" + str(index_update) + \ - " --time=" + str(10) + \ - " --non_index_updates=" + str(non_index_update) + " run >" + \ - self.workdir + "/log/sysbench_oltp_read_write.log" - if self.debug == 'YES': - print(query) - query_status = os.system(query) + params['rand_type'] = rand_type + params['index_updates'] = str(index_update) + params['non_index_updates'] = str(non_index_update) + params['delete_inserts'] = str(delete_insert) + + query = ("sysbench {lua} --table-size={table-size} --tables={tables} --threads={threads} --mysql-db={db} " + "--mysql-user={user} --mysql-password={password} --db-driver=mysql " + "--mysql-socket={socket} --rand_type={rand_type} --db-ps-mode=disable " + "--delete_inserts={delete_inserts} --index_updates={index_updates} " + "--time={time} --non_index_updates={non_index_updates} run > {log-file}").format(**params) + combination = "rand_type:" + rand_type + \ ", delete_inserts:" + str(delete_insert) + \ ",idx_updates:" + str(index_update) + \ ", non_idx_updates:" + str(non_index_update) - if int(query_status) != 0: - print("ERROR!: sysbench oltp(" + combination + ") run is failed") + + if self.execute_sysbench_query(query) != 0: + print("ERROR!: sysbench read only(" + combination + ") run is failed") + exit(1) else: - self.utility_cmd.check_testcase(query_status, "Sysbench oltp(" + combination + ") run") + self.__utility_cmd.check_testcase(0, "Sysbench read only(" + combination + ") run") - def sysbench_custom_read_qa(self, db, table_count, thread, table_size): + def sysbench_custom_read_qa(self, db, table_count, threads, table_size=SYSBENCH_READ_QA_TABLE_SIZE): # Create sysbench table structure result = self.sysbench_load(db, table_count, table_count, table_size) - self.utility_cmd.check_testcase(result, "Sysbench data load") - sum_ranges = [2, 4, 6] - distinct_ranges = [3, 5, 7] - simple_ranges = [1, 3, 5] - order_ranges = [2, 5, 8] - point_selects = [10, 20, 30] + self.__utility_cmd.check_testcase(result, "Sysbench data load") + + params = self.get_params('oltp_read_only.lua', table_size, table_count, threads, + db, "sysbench_oltp_read_only.log") + params['time'] = str(10) + + sum_ranges = [2, 6] # 4 + distinct_ranges = [3, 7] # 5 + simple_ranges = [5] # 1, 3 + order_ranges = [8] # 2, 5 + point_selects = [10, 30] # 20 for sum_range, distinct_range, simple_range, order_range, point_select in \ itertools.product(sum_ranges, distinct_ranges, simple_ranges, order_ranges, point_selects): - query = EXPORT_LUA_PATH + ";sysbench " + parent_dir + \ - "/sysbench_lua/oltp_read_only.lua" \ - " --table-size=" + str(table_size) + \ - " --tables=" + str(table_count) + \ - " --threads=" + str(thread) + \ - " --mysql-db=" + db + " " + SYSBENCH_DB_CONNECT + \ - " --mysql-socket=" + self.socket + \ - " --distinct_ranges=" + str(distinct_range) + \ - " --sum_ranges=" + str(sum_range) + \ - " --simple_ranges=" + str(simple_range) + \ - " --order_ranges=" + str(order_range) + \ - " --point_selects=" + str(point_select) + \ - " --time=" + str(10) + \ - " run >" + self.workdir + "/log/sysbench_oltp_read_only.log" - if self.debug == 'YES': - print(query) - query_status = os.system(query) - combination = "distinct_rng:" + str(distinct_range) + \ - ", sum_rng:" + str(sum_range) + \ - ", simple_rng:" + str(simple_range) + \ - ", point_selects:" + str(point_select) + \ - ", order_rng:" + str(order_range) - if int(query_status) != 0: + params['distinct_ranges'] = str(distinct_range) + params['sum_ranges'] = str(sum_range) + params['simple_ranges'] = str(simple_range) + params['order_ranges'] = str(order_range) + params['point_selects'] = str(point_select) + + query = ("sysbench {lua} --table-size={table-size} --tables={tables} --threads={threads} --mysql-db={db} " + "--mysql-user={user} --mysql-password={password} --db-driver=mysql " + "--mysql-socket={socket} --distinct_ranges={distinct_ranges} --sum_ranges={sum_ranges} " + "--simple_ranges={simple_ranges} --order_ranges={order_ranges} --point_selects={point_selects} " + "--time={time} run > {log-file}").format(**params) + + combination = "distinct_rng:" + params['distinct_ranges'] + \ + ", sum_rng:" + params['sum_ranges'] + \ + ", simple_rng:" + params['simple_ranges'] + \ + ", point_selects:" + params['point_selects'] + \ + ", order_rng:" + params['order_ranges'] + + if self.execute_sysbench_query(query) != 0: print("ERROR!: sysbench read only(" + combination + ") run is failed") exit(1) else: - self.utility_cmd.check_testcase(query_status, "Sysbench read only(" + combination + ") run") - - def sysbench_cleanup(self, db, tables, threads, table_size): - # Sysbench data cleanup - query = "sysbench /usr/share/sysbench/oltp_insert.lua" \ - " --table-size=" + str(table_size) + \ - " --tables=" + str(tables) + \ - " --threads=" + str(threads) + \ - " --mysql-db=" + db + " " + SYSBENCH_DB_CONNECT + \ - " --mysql-socket=" + self.socket + \ - " cleanup >" + self.workdir + "/log/sysbench_cleanup.log" - if self.debug == 'YES': - print(query) - query_status = os.system(query) - if int(query_status) != 0: - print("ERROR!: sysbench cleanup run is failed") - return 1 - return 0 + self.__utility_cmd.check_testcase(0, "Sysbench read only(" + combination + ") run") + + def sysbench_cleanup(self, db, table_count, threads, table_size): - def sysbench_oltp_read_write(self, db, tables, threads, table_size, time, background=None): - if background == "Yes": - str_run = self.workdir + "/log/sysbench_read_write_" + str(threads) + ".log & " + params = self.get_params('oltp_insert.lua', table_size, table_count, threads, + db, "sysbench_cleanup.log") + + query = ("sysbench {lua} --table-size={table-size} --tables={tables} --threads={threads} --mysql-db={db} " + "--mysql-user={user} --mysql-password={password} --db-driver=mysql " + "--mysql-socket={socket} cleanup > {log-file}").format(**params) + + return self.execute_sysbench_query(query) + + def test_sysbench_cleanup(self, db, tables, threads, table_size): + result = self.sysbench_cleanup(db, tables, threads, table_size) + self.__utility_cmd.check_testcase(result, "Sysbench data cleanup (threads : " + str(threads) + ")") + + def sysbench_oltp_read_write(self, db, table_count, threads, table_size, time, background: bool = False, port=None): + if background: + log_file = "sysbench_read_write_" + str(threads) + ".log & " else: - str_run = self.workdir + "/log/sysbench_read_write_" + str(threads) + ".log " - # Sysbench OLTP read write run - query = "sysbench /usr/share/sysbench/oltp_read_write.lua" \ - " --table-size=" + str(table_size) + \ - " --tables=" + str(tables) + \ - " --threads=" + str(threads) + \ - " --mysql-db=" + db + " " + SYSBENCH_DB_CONNECT + \ - " --mysql-socket=" + self.socket + \ - " --time=" + str(time) + \ - " --db-ps-mode=disable run > " + str_run - if self.debug == 'YES': - print(query) - query_status = os.system(query) - if int(query_status) != 0: - print("ERROR!: sysbench read write run is failed") - return 1 - return 0 + log_file = "sysbench_read_write_" + str(threads) + ".log " + + if port is not None: + host_to_connect = " --mysql-host=127.0.0.1 --mysql-port=" + str(port) + else: + host_to_connect = " --mysql-socket=" + self.__node.get_socket() + + params = self.get_params('oltp_read_write.lua', table_size, table_count, threads, + db, log_file) + params['time'] = str(time) - def sysbench_oltp_read_only(self, db, tables, threads, table_size, time, background=None): - if background == "Yes": - str_run = self.workdir + "/log/sysbench_read_only.log & " + query = ("sysbench {lua} --table-size={table-size} --tables={tables} --threads={threads} --mysql-db={db} " + "--mysql-user={user} --mysql-password={password} --db-driver=mysql " + host_to_connect + + " --time={time} --db-ps-mode=disable run > {log-file}").format(**params) + + return self.execute_sysbench_query(query) + + def test_sysbench_oltp_read_write(self, db, tables=SYSBENCH_TABLE_COUNT, threads=SYSBENCH_THREADS, + table_size=SYSBENCH_NORMAL_TABLE_SIZE, time=SYSBENCH_RUN_TIME, + background=False, port=None, is_terminate=True, use_load_table_size=False): + if use_load_table_size: + table_size = SYSBENCH_LOAD_TEST_TABLE_SIZE + result = self.sysbench_oltp_read_write(db, tables, threads, table_size, time, background, port) + self.__utility_cmd.check_testcase(result, "Initiated sysbench oltp run", is_terminate) + + def sysbench_oltp_read_only(self, db, table_count, threads, table_size, time, background: bool = False): + if background: + log_file = "sysbench_read_only.log & " else: - str_run = self.workdir + "/log/sysbench_read_only.log " + log_file = "sysbench_read_only.log " + + params = self.get_params('oltp_read_only.lua', table_size, table_count, threads, + db, log_file) + params['time'] = str(time) + # Sysbench OLTP read only run - query = "sysbench /usr/share/sysbench/oltp_read_only.lua" \ - " --table-size=" + str(table_size) + \ - " --tables=" + str(tables) + \ - " --threads=" + str(threads) + \ - " --mysql-db=" + db + " " + SYSBENCH_DB_CONNECT + \ - " --mysql-socket=" + self.socket + \ - " --time=" + str(time) + \ - " --db-ps-mode=disable run > " + str_run - if self.debug == 'YES': - print(query) - query_status = os.system(query) - if int(query_status) != 0: - print("ERROR!: sysbench read only run is failed") - return 1 - return 0 + query = ("sysbench {lua} --table-size={table-size} --tables={tables} --threads={threads} --mysql-db={db} " + "--mysql-user={user} --mysql-password={password} --db-driver=mysql " + "--mysql-socket={socket} --time={time} --db-ps-mode=disable run > {log-file}").format(**params) + + return self.execute_sysbench_query(query) + + def test_sysbench_oltp_read_only(self, db, table_count, threads, table_size, time, background=False): + result = self.sysbench_oltp_read_only(db, table_count, threads, table_size, time, background) + self.__utility_cmd.check_testcase(result, "Initiated sysbench oltp read only run") - def sysbench_oltp_write_only(self, db, tables, threads, table_size, time, background=None): - if background == "Yes": - str_run = self.workdir + "/log/sysbench_write_only.log &" + def sysbench_oltp_write_only(self, db, table_count, threads, table_size, time, background: bool = False): + if background: + log_file = "sysbench_write_only.log &" else: - str_run = self.workdir + "/log/sysbench_write_only.log" - # Sysbench OLTP write only run - query = "sysbench /usr/share/sysbench/oltp_write_only.lua" \ - " --table-size=" + str(table_size) + \ - " --tables=" + str(tables) + \ - " --threads=" + str(threads) + \ - " --mysql-db=" + db + " " + SYSBENCH_DB_CONNECT + \ - " --mysql-socket=" + self.socket + \ - " --time=" + str(time) + \ - " --db-ps-mode=disable run > " + str_run - if self.debug == 'YES': - print(query) - query_status = os.system(query) - if int(query_status) != 0: - print("ERROR!: sysbench write only run is failed") + log_file = "sysbench_write_only.log" + + params = self.get_params('oltp_write_only.lua', table_size, table_count, threads, + db, log_file) + params['time'] = str(time) + + if int(os.system(EXPORT_LUA_PATH)) != 0: + print("ERROR!: sysbench data load run is failed") return 1 - return 0 + + # Sysbench OLTP write only run + query = ("sysbench {lua} --table-size={table-size} --tables={tables} --threads={threads} --mysql-db={db} " + "--mysql-user={user} --mysql-password={password} --db-driver=mysql " + "--mysql-socket={socket} --time={time} --db-ps-mode=disable run > {log-file}").format(**params) + + return self.execute_sysbench_query(query) def sysbench_custom_table(self, db, table_count, thread, table_size): table_format = ['DEFAULT', 'DYNAMIC', 'FIXED', 'COMPRESSED', 'REDUNDANT', 'COMPACT'] # table_compression = ['ZLIB', 'LZ4', 'NONE'] - if not os.path.exists(parent_dir + '/sysbench_lua'): + if not os.path.exists(lua_dir): print("ERROR!: Cannot access 'sysbench_lua': No such directory") exit(1) for tbl_format in table_format: - query = self.basedir + "/bin/mysql --user=root --socket=" + \ - self.socket + " -e'drop database if exists " + \ - db + "_" + tbl_format + "; create database " + \ - db + "_" + tbl_format + ";' > /dev/null 2>&1" - if self.debug == 'YES': - print(query) - query_status = os.system(query) - if int(query_status) != 0: - # return 1 - print("ERROR!: Could not create sysbench test database(" + db + "_" + tbl_format + ")") - exit(1) + queries = ["drop database if exists " + db + "_" + tbl_format, + "create database " + db + "_" + tbl_format] + self.__node.execute_queries(queries) + row_format_option = 'sed -i ' \ - "'s#mysql_table_options = " \ - '.*."#mysql_table_options = "row_format=' + \ - tbl_format + '"#g' + "' " + parent_dir + \ - '/sysbench_lua/oltp_custom_common.lua' - if self.debug == 'YES': + "'s#mysql_table_options = " \ + '.*."#mysql_table_options = "row_format=' + \ + tbl_format + '"#g' + "' " + lua_dir + \ + 'oltp_custom_common.lua' + if self.__debug == 'YES': print(row_format_option) os.system(row_format_option) self.sysbench_load(db + "_" + tbl_format, table_count, thread, table_size) row_format_option = 'sed -i ' \ "'s#mysql_table_options = " \ '.*."#mysql_table_options = "' + \ - '"#g' + "' " + parent_dir + \ - '/sysbench_lua/oltp_custom_common.lua' - if self.debug == 'YES': + '"#g' + "' " + lua_dir + \ + 'oltp_custom_common.lua' + if self.__debug == 'YES': print(row_format_option) os.system(row_format_option) return 0 - def sysbench_tpcc_run(self, db, tables, threads, table_size, time, background=None): - if background == "Yes": - str_run = self.workdir + "/log/sysbench_write_only.log &" + def test_sysbench_custom_table(self, db, table_count=SYSBENCH_TABLE_COUNT, thread=SYSBENCH_THREADS, + table_size=SYSBENCH_CUSTOMIZED_DATALOAD_TABLE_SIZE): + result = self.sysbench_custom_table(db, table_count, thread, table_size) + utility_cmd = utility.Utility(self.__debug) + utility_cmd.check_testcase(result, "Sysbench data load") + + def sysbench_tpcc_run(self, db, table_count, threads, table_size, time, background: bool = False): + if background: + log_file = "sysbench_write_only.log &" else: - str_run = self.workdir + "/log/sysbench_write_only.log" + log_file = "sysbench_write_only.log" + + params = self.get_params('oltp_write_only.lua', table_size, table_count, threads, + db, log_file) + params['time'] = str(time) + + if int(os.system(EXPORT_LUA_PATH)) != 0: + print("ERROR!: sysbench data load run is failed") + return 1 + # Sysbench OLTP write only run - query = "sysbench /usr/share/sysbench/oltp_write_only.lua" \ - " --table-size=" + str(table_size) + \ - " --tables=" + str(tables) + \ - " --threads=" + str(threads) + \ - " --mysql-db=" + db + " " + SYSBENCH_DB_CONNECT + \ - " --mysql-socket=" + self.socket + \ - " --time=" + str(time) + \ - " --db-ps-mode=disable run > " + str_run - if self.debug == 'YES': + query = ("sysbench {lua} --table-size={table-size} --tables={tables} --threads={threads} --mysql-db={db} " + "--mysql-user={user} --mysql-password={password} --db-driver=mysql " + "--mysql-socket={socket} --time={time} --db-ps-mode=disable run > {log-file}").format(**params) + + return self.execute_sysbench_query(query) + + def execute_sysbench_query(self, query): + if self.__debug == 'YES': print(query) - query_status = os.system(query) - if int(query_status) != 0: - print("ERROR!: sysbench write only run is failed") + if int(os.system(EXPORT_LUA_PATH + ";" + query)) != 0: + print("ERROR!: sysbench run is failed") return 1 return 0 + def encrypt_sysbench_tables(self, db: str): + for i in range(1, int(SYSBENCH_TABLE_COUNT) + 1): + query = "alter table " + db + ".sbtest" + str(i) + " encryption='Y'" + self.__node.execute(query) diff --git a/util/table_checksum.py b/util/table_checksum.py index a145dab..5ee50aa 100644 --- a/util/table_checksum.py +++ b/util/table_checksum.py @@ -1,130 +1,80 @@ import os + from util import utility +from util.db_connection import DbConnection class TableChecksum: - def __init__(self, pt_basedir, basedir, workdir, node, socket, debug): - self.pt_basedir = pt_basedir - self.basedir = basedir - self.workdir = workdir - self.node = node - self.socket = socket - self.debug = debug - self.utility_cmd = utility.Utility(debug) + def __init__(self, node: DbConnection, workdir, pt_basedir, debug): + self.__workdir = workdir + self.__pt_basedir = pt_basedir + self.__node = node + self.__debug = debug + self.__utility_cmd = utility.Utility(debug) - def run_query(self, query): - query_status = os.system(query) - if int(query_status) != 0: - print("ERROR! Query execution failed: " + query) - return 1 - return 0 - - def sanity_check(self): + def sanity_check(self, nodes: list[DbConnection]): """ Sanity check method will check the availability of pt-table-checksum binary file. """ - if not os.path.isfile(self.pt_basedir + '/bin/pt-table-checksum'): + if not os.path.isfile(self.__pt_basedir + '/bin/pt-table-checksum'): print('pt-table-checksum is missing in percona toolkit basedir') return 1 - version = self.utility_cmd.version_check(self.basedir) + version = self.__utility_cmd.version_check(self.__node.get_socket()) + + queries = ["create user if not exists pt_user@'localhost' identified by 'test'", + "grant all on *.* to pt_user@'localhost'"] + # Creating pt_user for database consistency check if int(version) < int("050700"): - query = self.basedir + "/bin/mysql --user=root --socket=" + \ - self.socket + ' -e"create user ' \ - " pt_user@'localhost' identified by 'test';" \ - "grant all on *.* to pt_user@'localhost'" \ - ';" > /dev/null 2>&1' - else: - query = self.basedir + "/bin/mysql --user=root --socket=" + \ - self.socket + ' -e"create user if not exists' \ - " pt_user@'localhost' identified with " \ - " mysql_native_password by 'test';" \ - "grant all on *.* to pt_user@'localhost'" \ - ';" > /dev/null 2>&1' - self.run_query(query) - # Creating percona db for cluster data checksum - query = self.basedir + "/bin/mysql --user=root --socket=" + \ - self.socket + ' -e"drop database if exists percona;' \ - 'create database percona;' \ - 'drop table if exists percona.dsns;' \ - 'create table percona.dsns(id int,' \ - 'parent_id int,dsn varchar(100), ' \ - 'primary key(id));" > /dev/null 2>&1' - self.run_query(query) + queries[0] = "create user pt_user@'localhost' identified by 'test'" + self.__node.execute_queries(queries) - for i in range(1, int(self.node) + 1): - port = self.basedir + "/bin/mysql --user=root " + \ - '--socket=' + self.workdir + '/node' + str(i) + '/mysql.sock' + \ - ' -Bse"select @@port" 2>&1' - port = os.popen(port).read().rstrip() + queries = ["drop database if exists percona", + "create database percona", + "create table percona.dsns(id int, parent_id int, dsn varchar(100), primary key(id))"] + # Creating percona db for cluster data checksum + self.__node.execute_queries(queries) - insert_query = self.basedir + "/bin/mysql --user=root " + \ - '--socket=' + self.socket + \ - ' -e"insert into percona.dsns (id,dsn) values (' + \ - str(i) + ",'h=127.0.0.1,P=" + str(port) + \ - ",u=pt_user,p=test');" \ - '"> /dev/null 2>&1' - self.run_query(insert_query) + for node in nodes: + self.__node.execute('insert into percona.dsns (id,dsn) values (' + str(node.get_port()) + ",'h=127.0.0.1,P=" + + str(node.get_port()) + ",u=pt_user,p=test')") return 0 def error_status(self, error_code): # Checking pt-table-checksum error + error_map = {'1': ": A non-fatal error occurred", '2': ": --pid file exists and the PID is running", + '4': ": Caught SIGHUP, SIGINT, SIGPIPE, or SIGTERM", + '8': ": No replicas or cluster nodes were found", '16': ": At least one diff was found", + '32': ": At least one chunk was skipped", '64': ": At least one table was skipped", } if error_code == "0": - self.utility_cmd.check_testcase(0, "pt-table-checksum run status") - elif error_code == "1": - self.utility_cmd.check_testcase(1, "pt-table-checksum error code " - ": A non-fatal error occurred") - elif error_code == "2": - self.utility_cmd.check_testcase(1, "pt-table-checksum error code " - ": --pid file exists and the PID is running") - elif error_code == "4": - self.utility_cmd.check_testcase(1, "pt-table-checksum error code " - ": Caught SIGHUP, SIGINT, SIGPIPE, or SIGTERM") - elif error_code == "8": - self.utility_cmd.check_testcase(1, "pt-table-checksum error code " - ": No replicas or cluster nodes were found") - elif error_code == "16": - self.utility_cmd.check_testcase(1, "pt-table-checksum error code " - ": At least one diff was found") - elif error_code == "32": - self.utility_cmd.check_testcase(1, "pt-table-checksum error code " - ": At least one chunk was skipped") - elif error_code == "64": - self.utility_cmd.check_testcase(1, "pt-table-checksum error code " - ": At least one table was skipped") + self.__utility_cmd.check_testcase(0, "pt-table-checksum run status") else: - self.utility_cmd.check_testcase(1, "pt-table-checksum error code " - ": Fatal error occurred. Please" - "check error log for more info") + msg = error_map.get(error_code) + if msg is None: + msg = ": Fatal error occurred. Please check error log for more info" + + self.__utility_cmd.check_testcase(1, "pt-table-checksum error code " + msg) def data_consistency(self, database): """ Data consistency check method will compare the data between cluster nodes """ - port = self.basedir + "/bin/mysql --user=root --socket=" + \ - self.socket + ' -Bse"select @@port" 2>&1' - port = os.popen(port).read().rstrip() - version = self.utility_cmd.version_check(self.basedir) + port = self.__node.execute_get_value("select @@port") + version = self.__utility_cmd.version_check(self.__node.get_base_dir()) # Disable pxc_strict_mode for pt-table-checksum run if int(version) > int("050700"): - query = self.basedir + "/bin/mysql --user=root --socket=" + \ - self.socket + ' -e"set global pxc_strict_mode=DISABLED;' \ - '" > /dev/null 2>&1' - self.run_query(query) + self.__node.execute("set global pxc_strict_mode=DISABLED") - run_checksum = self.pt_basedir + "/bin/pt-table-checksum h=127.0.0.1,P=" + \ - str(port) + ",u=pt_user,p=test -d" + database + \ - " --recursion-method dsn=h=127.0.0.1,P=" + str(port) + \ - ",u=pt_user,p=test,D=percona,t=dsns >" + self.workdir + "/log/pt-table-checksum.log 2>&1; echo $?" + run_checksum = self.__pt_basedir + "/bin/pt-table-checksum h=127.0.0.1,P=" + \ + str(port) + ",u=pt_user,p=test -d" + database + \ + " --recursion-method dsn=h=127.0.0.1,P=" + str(port) + \ + ",u=pt_user,p=test,D=percona,t=dsns >" + self.__workdir + "/log/pt-table-checksum.log 2>&1; echo $?" checksum_status = os.popen(run_checksum).read().rstrip() self.error_status(checksum_status) if int(version) > int("050700"): # Enable pxc_strict_mode after pt-table-checksum run - query = self.basedir + "/bin/mysql --user=root --socket=" + \ - self.socket + ' -e"set global pxc_strict_mode=ENFORCING;' \ - '" > /dev/null 2>&1' - self.run_query(query) + self.__node.execute("set global pxc_strict_mode=ENFORCING") return 0 diff --git a/util/utility.py b/util/utility.py index 5f4ffe8..c485163 100644 --- a/util/utility.py +++ b/util/utility.py @@ -1,38 +1,75 @@ #!/usr/bin/env python3 import os -import random -import shutil -import subprocess import sys import time from datetime import datetime -from distutils.spawn import find_executable -from util import db_connection -from util import pxc_startup -from util import ps_startup +from enum import Enum + +import config +from util.db_connection import DbConnection + +pstress_bin = config.PSTRESS_BIN + +DEFAULT_SERVER_UP_TIMEOUT = 300 + + +class RplType(Enum): + GTID_LESS = 1 + GTID = 2 + BACKUP_REPLICA = 3 + + +def test_header(test_description: str): + print('------------------------------------------------------------------------------------') + print(test_description) + print('------------------------------------------------------------------------------------') + + +def test_scenario_header(test_scenario_description: str): + print('------------------------------------------------------------------------------------') + print(datetime.now().strftime("%H:%M:%S ") + ' ' + test_scenario_description) + print('------------------------------------------------------------------------------------') + + +def sysbench_pid(): + query = 'pidof sysbench' + return os.popen(query).read().rstrip() + + +def sysbech_node_pid(node_number: int): + query = ("ps -ef | grep sysbench | grep -v gep | grep node" + str(node_number) + + " | awk '{print $2}'") + return os.popen(query).read().rstrip() + + +def get_mysql_version(basedir: str): + query = basedir + "/bin/mysqld --version 2>&1 | grep -oe '[0-9]\.[0-9][\.0-9]*' | head -n1" + return os.popen(query).read().rstrip() + + +class Version(Enum): + LOWER = 1 + HIGHER = 2 -backup_dir="" class Utility: def __init__(self, debug): self.debug = debug + self.outfile = "/tmp/result.file" - def printit(self, text, status): - # print the testcase status - now = datetime.now().strftime("%H:%M:%S ") - print(now + ' ' + f'{text:100}' + '[ ' + status + ' ]') - - def check_testcase(self, result, testcase, is_terminate=None): + @staticmethod + def check_testcase(result, testcase, is_terminate: bool = True): # print testcase status based on success/failure output. now = datetime.now().strftime("%H:%M:%S ") if result == 0: print(now + ' ' + f'{testcase:100}' + '[ \u2713 ]') else: print(now + ' ' + f'{testcase:100}' + '[ \u2717 ]') - if is_terminate is None: + if is_terminate: exit(1) - def check_python_version(self): + @staticmethod + def check_python_version(): """ Check python version. Raise error if the version is 3.5 or lower """ @@ -40,7 +77,8 @@ def check_python_version(self): print("\nError! You should use python 3.6 or greater\n") exit(1) - def version_check(self, basedir): + @staticmethod + def version_check(basedir: str): # Get database version number version_info = os.popen(basedir + "/bin/mysqld --version 2>&1 " "| grep -oe '[0-9]\.[0-9][\.0-9]*' | head -n1").read() @@ -49,237 +87,70 @@ def version_check(self, basedir): int(version_info.split('.')[2])) return version - def create_custom_cnf(self, parent_dir, workdir): - """ Add random mysqld options - """ - # Read 10 random mysqld options from the option file - with open(parent_dir + '/conf/mysql_options_pxc57.txt') as f: - lines = random.sample(f.readlines(), 10) - cnf_name = open(workdir + '/conf/custom.cnf', 'a+') - cnf_name.write('\n') - # Add the random option in custom.cnf - for x in range(len(lines)): - cnf_name.write(lines[x]) - cnf_name.close() - return 0 - - def check_table_count(self, basedir, db, socket1, socket2): + def test_table_count(self, node1: DbConnection, node2: DbConnection, db): """ This method will compare the table count between two nodes """ - query = basedir + '/bin/mysql -uroot ' + db + ' --socket=' + \ - socket1 + ' -Bse"show tables;"' - tables = os.popen(query).read().rstrip() - tables_names = tables.split('\n') + result = 0 + tables = node1.execute_get_values("show tables in " + db) # Compare the table checksum between node1 and node2 - for index, table in enumerate(tables_names): - query = basedir + '/bin/mysql -uroot --socket=' + \ - socket1 + ' -Bse"checksum table ' + \ - db + '.' + table + ';"' - table_count_node1 = os.popen(query).read().rstrip() - if self.debug == 'YES': - print(query) - print('Table count ' + table_count_node1) - query = basedir + '/bin/mysql -uroot --socket=' + \ - socket2 + ' -Bse"checksum table ' + \ - db + '.' + table + ';"' - table_count_node2 = os.popen(query).read().rstrip() - if self.debug == 'YES': - print(query) - print('Table count ' + table_count_node2) - if table_count_node1 == table_count_node2: - # Using Mod, get index of element and check if we reached to the end - # of list, if so return otherwise continue checking other tables. - next_index = (index + 1) % len(tables_names) - if next_index == 0: - return 0 - else: continue - else: - print("\tTable(" + db + '.' + table + " ) checksum is different") - return 1 - - def pxb_sanity_check(self, basedir, workdir, socket): - """ This method will check pxb installation and - cleanup backup directory - """ - # Check xtrabackup installation - if find_executable('xtrabackup') is None: - print('\tERROR! Percona Xtrabackup is not installed.') - exit(1) - - # Recreate backup directory - if os.path.exists(workdir + '/backup'): - shutil.rmtree(workdir + '/backup') - os.mkdir(workdir + '/backup') - else: - os.mkdir(workdir + '/backup') + for table in tables: + table_count_node1 = node1.execute_get_value('checksum table ' + db + '.' + table[0], 4) + table_count_node2 = node2.execute_get_value('checksum table ' + db + '.' + table[0], 7) + if table_count_node1 != table_count_node2: + print("\tTable(" + db + '.' + table[0] + " ) checksum is different") + result = 1 + self.check_testcase(result, "Checksum run for DB: " + db) - # Check PXC version and create XB user with mysql_native_password plugin. - version = self.version_check(basedir) - if int(version) < int("050700"): - create_user = basedir + "/bin/mysql --user=root " \ - "--socket=" + socket + ' -e"create user xbuser' \ - "@'localhost' identified by 'test" \ - "';grant all on *.* to xbuser@'localhost'" \ - ';" > /dev/null 2>&1' - else: - create_user = basedir + "/bin/mysql --user=root " \ - "--socket=" + socket + ' -e"create user xbuser' \ - "@'localhost' identified with mysql_native_password by 'test" \ - "';grant all on *.* to xbuser@'localhost'" \ - ';" > /dev/null 2>&1' - if self.debug == 'YES': - print(create_user) - query_status = os.system(create_user) - if int(query_status) != 0: - print("ERROR!: Could not create xtrabackup user user : xbuser") - exit(1) - - def pxb_backup(self, workdir, source_datadir, socket, encryption, dest_datadir=None): - """ This method will backup PXC/PS data directory - with the help of xtrabackup. - """ - # Enable keyring file plugin if it is encryption run - if encryption == 'YES': - backup_extra = " --keyring-file-data=" + source_datadir + \ - "/keyring --early-plugin-load='keyring_file=keyring_file.so'" - else: - backup_extra = '' - - # Backup data using xtrabackup - backup_cmd = "xtrabackup --user=xbuser --password='test' --backup " \ - " --target-dir=" + workdir + "/backup -S" + \ - socket + " --datadir=" + source_datadir + " " + backup_extra + " --lock-ddl >" + \ - workdir + "/log/xb_backup.log 2>&1" - if self.debug == 'YES': - print(backup_cmd) - os.system(backup_cmd) - - # Prepare backup for node startup - prepare_backup = "xtrabackup --prepare --target_dir=" + \ - workdir + "/backup " + backup_extra + " --lock-ddl >" + \ - workdir + "/log/xb_backup_prepare.log 2>&1" - if self.debug == 'YES': - print(prepare_backup) - os.system(prepare_backup) - - # copy backup directory to destination - if dest_datadir is not None: - copy_backup = "xtrabackup --copy-back --target-dir=" + \ - workdir + "/backup --datadir=" + \ - dest_datadir + " " + backup_extra + " --lock-ddl >" + \ - workdir + "/log/copy_backup.log 2>&1" - if self.debug == 'YES': - print(copy_backup) - os.system(copy_backup) - - # Set backup dir - global backup_dir - backup_dir=workdir + "/backup" - if self.debug == 'YES': - print("Backup dir path: ", backup_dir) - - # Copy keyring file to destination directory for encryption startup - if encryption == 'YES': - os.system("cp " + source_datadir + "/keyring " + dest_datadir) - - def replication_io_status(self, basedir, socket, node, channel): + def replication_io_status(self, node: DbConnection, version: str, channel: str = ''): """ This will check replication IO thread running status """ if channel == 'none': - channel = "" # channel name is to identify the replication source + channel = "" + # Get slave status - version = self.version_check(basedir) if int(version) < int("050700"): - io_status = basedir + "/bin/mysql --user=root --socket=" + \ - socket + ' -Bse"SHOW SLAVE STATUS\G" 2>&1 ' \ - '| grep "Slave_IO_Running:" ' \ - "| awk '{ print $2 }'" - if self.debug == 'YES': - print(io_status) - io_status = os.popen(io_status).read().rstrip() - if io_status == "Yes": - check_slave_status = 'ON' - else: - check_slave_status = 'OFF' + replica_status = node.get_column_value("SHOW SLAVE STATUS", "Slave_IO_Running") else: - check_slave_status = basedir + "/bin/mysql --user=root --socket=" + \ - socket + ' -Bse"SELECT SERVICE_STATE ' \ - 'FROM performance_schema.replication_connection_status' \ - " where channel_name='" + channel + "'" + '" 2>&1' - if self.debug == 'YES': - print(check_slave_status) - check_slave_status = os.popen(check_slave_status).read().rstrip() - if check_slave_status != 'ON': - self.check_testcase(1, node + ": IO thread slave status") - print("\tERROR!: Slave IO thread is not running, check slave status") - exit(1) + replica_status = node.execute_get_value("SELECT SERVICE_STATE FROM " + "performance_schema.replication_connection_status where " + "channel_name='" + channel + "'") + if replica_status not in ['ON', 'Yes']: + self.check_testcase(1, "Replica IO thread is not running, check replica status") else: - self.check_testcase(0, node + ": IO thread slave status") + self.check_testcase(0, "Replica IO thread is running fine") - def replication_sql_status(self, basedir, socket, node, channel): + def replication_sql_status(self, node: DbConnection, version: str, channel: str = ''): """ This will check replication SQL thread running status """ if channel == 'none': channel = "" # channel name is to identify the replication source + # Get slave status - version = self.version_check(basedir) if int(version) < int("050700"): - sql_status = basedir + "/bin/mysql --user=root --socket=" + \ - socket + ' -Bse"SHOW SLAVE STATUS\G" 2>&1 ' \ - '| grep "Slave_SQL_Running:" ' \ - "| awk '{ print $2 }'" - if self.debug == 'YES': - print(sql_status) - sql_status = os.popen(sql_status).read().rstrip() - if sql_status == "Yes": - check_slave_status = 'ON' - else: - check_slave_status = 'OFF' + replica_status = node.get_column_value("SHOW SLAVE STATUS", "Slave_SQL_Running") else: - check_slave_status = basedir + "/bin/mysql --user=root --socket=" + \ - socket + ' -Bse"SELECT SERVICE_STATE ' \ - 'FROM performance_schema.replication_applier_status' \ - " where channel_name='" + channel + "'" + '" 2>&1' - if self.debug == 'YES': - print(check_slave_status) - check_slave_status = os.popen(check_slave_status).read().rstrip() - if check_slave_status != 'ON': - self.check_testcase(1, node + ": SQL thread slave status") - print("\tERROR!: Slave SQL thread is not running, check slave status") - exit(1) + replica_status = node.execute_get_value("SELECT SERVICE_STATE FROM " + "performance_schema.replication_applier_status where " + "channel_name='" + channel + "'") + if replica_status not in ['YES', 'ON']: + self.check_testcase(1, "Replica SQL thread is not running, check replica status") else: - self.check_testcase(0, node + ": SQL thread slave status") - - def invoke_replication(self, basedir, master_socket, slave_socket, repl_mode, comment): - """ This method will invoke replication. - :param basedir: PXC/PS base directory - :param master_socket: Master Server socket - :param slave_socket: Slave server socket - :param repl_mode: Three mode will support now - GTID : GTID replication - NON-GTID : Non GTID replication - backup_slave : This will start replication - from XB backup and it uses - non-gtid replication - :param comment: Replication channel details - """ - if comment == 'none': - comment = "" # channel name is to identify the replication source + self.check_testcase(0, "Replica SQL thread is running fine") + + def invoke_replication(self, source_node: DbConnection, replica_node: DbConnection, + repl_mode: RplType, channel_name: str = None, + backup_dir=''): + if channel_name is None: + channel_name = "" # channel name is to identify the replication source # Setup async replication - flush_log = basedir + "/bin/mysql --user=root --socket=" + \ - master_socket + ' -Bse "flush logs" 2>&1' - if self.debug == 'YES': - print(flush_log) - os.system(flush_log) - if repl_mode == 'backup_slave': - data_dir = basedir + "/bin/mysql --user=root --socket=" + \ - slave_socket + " -Bse 'select @@datadir';" + source_node.execute("flush logs") + if repl_mode == RplType.BACKUP_REPLICA: + data_dir = replica_node.execute_get_value('select @@datadir') if self.debug == 'YES': print(data_dir) - data_dir = os.popen(data_dir).read().rstrip() query = "cat " + backup_dir + "/xtrabackup_binlog_info | awk '{print $1}'" master_log_file = os.popen(query).read().rstrip() query = "cat " + backup_dir + "/xtrabackup_binlog_info | awk '{print $2}'" @@ -289,174 +160,112 @@ def invoke_replication(self, basedir, master_socket, slave_socket, repl_mode, co print("master_log_file: ", master_log_file) print("master_log_pos: ", master_log_pos) else: - master_log_file = basedir + "/bin/mysql --user=root --socket=" + \ - master_socket + \ - " -Bse 'show master logs' | awk '{print $1}' | tail -1 2>&1" + master_log_file = source_node.get_column_value("show master logs", "Log_name") if self.debug == 'YES': print(master_log_file) - master_log_file = os.popen(master_log_file).read().rstrip() master_log_pos = 4 - master_port = basedir + "/bin/mysql --user=root --socket=" + \ - master_socket + \ - ' -Bse "select @@port" 2>&1' - master_port = os.popen(master_port).read().rstrip() - if repl_mode == 'GTID': - invoke_slave = basedir + "/bin/mysql --user=root --socket=" + \ - slave_socket + ' -Bse"CHANGE MASTER TO MASTER_HOST=' + \ - "'127.0.0.1', MASTER_PORT=" + master_port + ", MASTER_USER='root'" + \ - ", MASTER_AUTO_POSITION=1 " + comment + ' ; START SLAVE;" 2>&1' + master_port = source_node.execute_get_value("select @@port") + + if repl_mode == RplType.GTID: + replica_node.execute("CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_PORT=" + str(master_port) + + ", MASTER_USER='root', MASTER_AUTO_POSITION=1 for channel '" + channel_name + "'") else: - invoke_slave = basedir + "/bin/mysql --user=root --socket=" + \ - slave_socket + ' -Bse"CHANGE MASTER TO MASTER_HOST=' + \ - "'127.0.0.1', MASTER_PORT=" + master_port + ", MASTER_USER='root'" + \ - ", MASTER_LOG_FILE='" + master_log_file + "'" + \ - ', MASTER_LOG_POS=' + str(master_log_pos) + ' ' \ - + comment + ';START SLAVE;" 2>&1' + replica_node.execute( + "CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_PORT=" + str( + master_port) + ", MASTER_USER='root', MASTER_LOG_FILE='" + master_log_file + "', MASTER_LOG_POS=" + + str(master_log_pos) + " for channel '" + channel_name + "'") + replica_node.execute("START SLAVE") + self.check_testcase(0, "Initiated replication") + + def pxc_startup_check(self, node: DbConnection): + # This method will check the pxc node startup status. + self.startup_check(node) + self.wait_for_wsrep_status(node) + + def kill_process(self, process_id: str, process_name: str, ignore_error=False): + kill_cmd = "kill -9 " + process_id + if ignore_error: + kill_cmd = kill_cmd + " > /dev/null 2>&1" if self.debug == 'YES': - print(invoke_slave) - result = os.system(invoke_slave) - self.check_testcase(result, "Initiated replication") - - def start_pxc(self, parent_dir, workdir, basedir, node, socket, user, encryption, my_extra): - # Start PXC cluster - dbconnection_check = db_connection.DbConnection(user, socket) - server_startup = pxc_startup.StartCluster(parent_dir, workdir, basedir, int(node), self.debug) - result = server_startup.sanity_check() - self.check_testcase(result, "Startup sanity check") - if encryption == 'YES': - result = server_startup.create_config('encryption') - self.check_testcase(result, "Configuration file creation") - else: - result = server_startup.create_config('none') - self.check_testcase(result, "Configuration file creation") - result = server_startup.initialize_cluster() - self.check_testcase(result, "Initializing cluster") - result = server_startup.start_cluster('--max-connections=1500 ' + my_extra) - self.check_testcase(result, "Cluster startup") - result = dbconnection_check.connection_check() - self.check_testcase(result, "Database connection") - - def start_ps(self, parent_dir, workdir, basedir, node, socket, user, encryption, my_extra): - """ Start Percona Server. This method will - perform sanity checks for PS startup - """ - # Start PXC cluster for replication test - dbconnection_check = db_connection.DbConnection(user, socket) - server_startup = ps_startup.StartPerconaServer(parent_dir, workdir, basedir, int(node)) - result = server_startup.sanity_check() - self.check_testcase(result, "PS: Startup sanity check") - if encryption == 'YES': - result = server_startup.create_config('encryption') - self.check_testcase(result, "PS: Configuration file creation") - else: - result = server_startup.create_config() - self.check_testcase(result, "PS: Configuration file creation") - result = server_startup.initialize_cluster() - self.check_testcase(result, "PS: Initializing cluster") - result = server_startup.start_server('--max-connections=1500 ' + my_extra) - self.check_testcase(result, "PS: Cluster startup") - result = dbconnection_check.connection_check() - self.check_testcase(result, "PS: Database connection") - - def stop_pxc(self, workdir, basedir, node): - # Stop PXC cluster - for i in range(int(node), 0, -1): - shutdown_node = basedir + '/bin/mysqladmin --user=root --socket=' + \ - workdir + '/node' + str(i) + '/mysql.sock shutdown > /dev/null 2>&1' - if self.debug == 'YES': - print(shutdown_node) - result = os.system(shutdown_node) - self.check_testcase(result, "PXC: shutting down cluster node" + str(i)) - - def stop_ps(self, workdir, basedir, node): - # Stop Percona Server - for i in range(int(node), 0, -1): - shutdown_node = basedir + '/bin/mysqladmin --user=root --socket=/tmp/psnode' + \ - str(i) + '.sock shutdown > /dev/null 2>&1' - if self.debug == 'YES': - print(shutdown_node) - result = os.system(shutdown_node) - self.check_testcase(result, "PS: shutting down cluster node" + str(i)) + print("Terminating " + process_name + " run : " + kill_cmd) + result = os.system(kill_cmd) + if ignore_error: + result = 0 + self.check_testcase(result, "Killed " + process_name + " run") + time.sleep(10) + + def restart_cluster(self, nodes: list[DbConnection]): + os.system("sed -i 's#safe_to_bootstrap: 0#safe_to_bootstrap: 1#' " + + config.WORKDIR + '/node1/grastate.dat') + for node in nodes: + self.restart_and_check_node(node) + + def restart_and_check_node(self, node: DbConnection): + self.restart_cluster_node(node) + self.startup_check(node) + + def restart_cluster_node(self, node: DbConnection): + + restart_server = "bash " + node.get_startup_script() + if self.debug == 'YES': + print(restart_server) + result = os.system(restart_server) + self.check_testcase(result, "Starting/Restarting Cluster Node" + str(node.get_node_number())) - def pxc_startup_check(self, basedir, workdir, cluster_node): + def startup_check(self, node: DbConnection, terminate_on_startup_failure: bool = True): """ This method will check the node startup status. """ - query_cluster_status = basedir + '/bin/mysql --user=root --socket=' + \ - workdir + '/node' + str(cluster_node) + \ - '/mysql.sock -Bse"show status like \'wsrep_local_state_comment\';"' \ - ' 2>/dev/null | awk \'{print $2}\'' - ping_query = basedir + '/bin/mysqladmin --user=root --socket=' + \ - workdir + '/node' + str(cluster_node) + \ - '/mysql.sock ping > /dev/null 2>&1' - for startup_timer in range(300): + dbconnection_status = -1 + for startup_timer in range(DEFAULT_SERVER_UP_TIMEOUT): time.sleep(1) - cluster_status = os.popen(query_cluster_status).read().rstrip() - if cluster_status == 'Synced': - self.check_testcase(0, "Node startup is successful") + dbconnection_status = int(node.connection_check(False)) + if dbconnection_status == 0: break - if startup_timer > 298: - self.check_testcase(0, "Warning! Node is not synced with cluster. " - "Check the error log to get more info") - ping_check = subprocess.call(ping_query, shell=True, stderr=subprocess.DEVNULL) - ping_status = ("{}".format(ping_check)) - if int(ping_status) == 0: - self.check_testcase(int(ping_status), "Node startup is successful " - "(Node status:" + cluster_status + ")") - break # break the loop if mysqld is running - - def node_joiner(self, workdir, basedir, donor_node, joiner_node): - # Add new node to existing cluster - donor = 'node' + donor_node # Donor node - joiner = 'node' + joiner_node # Joiner node - shutil.copy(workdir + '/conf/' + donor + '.cnf', - workdir + '/conf/' + joiner + '.cnf') - query = basedir + '/bin/mysql --user=root --socket=' + workdir + '/node' + donor_node + \ - '/mysql.sock -Bse"show variables like \'wsrep_cluster_address\';"' \ - ' 2>/dev/null | awk \'{print $2}\'' - wsrep_cluster_addr = os.popen(query).read().rstrip() # Get cluster address - query = basedir + "/bin/mysql --user=root --socket=" + \ - workdir + '/node' + donor_node + '/mysql.sock -Bse"select @@port" 2>&1' - if self.debug == 'YES': - print(query) - port_no = os.popen(query).read().rstrip() # Port number from Donor - wsrep_port_no = int(port_no) + 108 # New wsrep port number - port_no = int(port_no) + 100 # New Joiner port number - - # Create new cnf for joiner - os.system("sed -i 's#" + donor + "#" + joiner + "#g' " + workdir + - '/conf/' + joiner + '.cnf') - os.system("sed -i '/wsrep_sst_auth=root:/d' " + workdir + - '/conf/' + joiner + '.cnf') - os.system("sed -i '0,/^[ \\t]*wsrep_cluster_address[ \\t]*=.*$/s|" - "^[ \\t]*wsrep_cluster_address[ \\t]*=.*$|wsrep_cluster_address=" - + wsrep_cluster_addr + "127.0.0.1:" + str(wsrep_port_no) + "|' " - + workdir + '/conf/' + joiner + '.cnf') - os.system("sed -i '0,/^[ \\t]*port[ \\t]*=.*$/s|" - "^[ \\t]*port[ \\t]*=.*$|port=" - + str(port_no) + "|' " + workdir + '/conf/' + joiner + '.cnf') - os.system('sed -i "0,/^[ \\t]*wsrep_provider_options[ \\t]*=.*$/s|' - "^[ \\t]*wsrep_provider_options[ \\t]*=.*$|wsrep_provider_options=" - "'gmcast.listen_addr=tcp://127.0.0.1:" + - str(wsrep_port_no) + "'" - '|" ' + workdir + '/conf/' + joiner + '.cnf') - os.system("sed -i '0,/^[ \\t]*server_id[ \\t]*=.*$/s|" - "^[ \\t]*server_id[ \\t]*=.*$|server_id=" - "14|' " + workdir + '/conf/' + joiner + '.cnf') - - # Create startup script for joiner. - shutil.copy(workdir + '/log/startup' + donor_node + '.sh', - workdir + '/log/startup' + joiner_node + '.sh') - os.system("sed -i 's#" + donor + "#" + joiner + "#g' " + workdir + - '/log/startup' + joiner_node + '.sh') - os.system("rm -rf " + workdir + '/' + joiner) - os.mkdir(workdir + '/' + joiner) - joiner_startup = "bash " + workdir + \ - '/log/startup' + joiner_node + '.sh' + self.check_testcase(dbconnection_status, "Verify node startup", terminate_on_startup_failure) + return dbconnection_status + + def wait_for_wsrep_status(self, node: DbConnection, node_sync_timeout=DEFAULT_SERVER_UP_TIMEOUT): + node_synced = -1 + for startup_timer in range(node_sync_timeout): + time.sleep(1) + wsrep_status = (node.execute_get_row("show status like 'wsrep_local_state_comment'")[1] + .strip()) + if wsrep_status == "Synced": + node_synced = 0 + break + self.check_testcase(node_synced, "Cluster Node recovery is successful") + + def kill_cluster_node(self, node: DbConnection): + pid_file = node.execute_get_value("select @@pid_file") + query = 'cat ' + pid_file + pid = os.popen(query).read().rstrip() + self.kill_process(pid, "cluster node") + + def kill_cluster_nodes(self): if self.debug == 'YES': - print(joiner_startup) - # Invoke joiner - result = os.system(joiner_startup) - self.check_testcase(result, "Starting cluster " + joiner) - self.pxc_startup_check(basedir, workdir, joiner_node) + print("Killing existing mysql process using 'kill -9' command") + os.system("ps -ef | grep '" + config.WORKDIR + "/conf/node[0-9].cnf' | grep -v grep | " + "awk '{print $2}' | xargs kill -9 >/dev/null 2>&1") + + def pstress_run(self, workdir: str, socket: str, db: str, seed: int, step_num: int = None, + tables: int = 25, threads: int = 50, records: int = None, pstress_extra: str = None): + if not os.path.isfile(pstress_bin): + print(pstress_bin + ' does not exist') + exit(1) + extra_setting = " --records 1000" + if records is not None: + extra_setting = " --records " + str(records) + if step_num is not None: + extra_setting = extra_setting + " --step " + str(step_num) + pstress_cmd = pstress_bin + " --database=" + db + " --threads=" + str(threads) + " --logdir=" + \ + workdir + "/log --log-all-queries --log-failed-queries --user=root --socket=" + \ + socket + " --seed " + str(seed) + " --tables " + str(tables) + " " + \ + pstress_extra + " --seconds 300 --grammar-file " + \ + config.PSTRESS_GRAMMAR_FILE + extra_setting + " > " + \ + workdir + "/log/pstress_run.log" + self.check_testcase(0, "PSTRESS RUN command : " + pstress_cmd) + query_status = os.system(pstress_cmd) + if int(query_status) != 0: + self.check_testcase(1, "ERROR!: PSTRESS run failed")