diff --git a/README.md b/README.md index f63b0832a2..d6f9a5e25a 100644 --- a/README.md +++ b/README.md @@ -51,7 +51,7 @@ A convenient alternative is to let the SONiC build system configure a build envi 2. Build the sonic-utilities Python wheel package inside the Bullseye slave container, and tell the build system to keep the container alive when finished ``` - make NOSTRETCH=1 NOBUSTER=1 KEEP_SLAVE_ON=yes target/python-wheels/bullseye/sonic_utilities-1.2-py3-none-any.whl + make -f Makefile.work BLDENV=bookworm KEEP_SLAVE_ON=yes target/python-wheels/bookworm/sonic_utilities-1.2-py3-none-any.whl ``` 3. When the build finishes, your prompt will change to indicate you are inside the slave container. Change into the `src/sonic-utilities/` directory @@ -66,6 +66,7 @@ A convenient alternative is to let the SONiC build system configure a build envi ``` python3 setup.py bdist_wheel ``` +Note: This command by default will not update the wheel package in target/. To specify the destination location of wheel package, use "-d" option. #### To run unit tests @@ -73,6 +74,12 @@ python3 setup.py bdist_wheel python3 setup.py test ``` +#### To install the package on a SONiC machine +``` +sudo pip uninstall sonic-utilities +sudo pip install YOUR_WHEEL_PACKAGE +``` +Note: Don't use "--force-reinstall". ### sonic-utilities-data diff --git a/azure-pipelines.yml b/azure-pipelines.yml index dec731eea4..8cb6586a9b 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -27,7 +27,7 @@ stages: displayName: "Static Analysis" timeoutInMinutes: 10 continueOnError: true - pool: ubuntu-20.04 + pool: sonic-ubuntu-1c steps: - template: .azure-pipelines/pre-commit-check.yml @@ -46,6 +46,13 @@ stages: image: sonicdev-microsoft.azurecr.io:443/sonic-slave-bullseye:$(BUILD_BRANCH) steps: + - script: | + set -ex + sudo apt-get update + sudo apt-get install -y python3-pip + sudo pip3 install requests==2.31.0 + displayName: "Install dependencies" + - script: | sourceBranch=$(Build.SourceBranchName) if [[ "$(Build.Reason)" == "PullRequest" ]];then diff --git a/config/bgp_cli.py b/config/bgp_cli.py new file mode 100644 index 0000000000..a5a565359a --- /dev/null +++ b/config/bgp_cli.py @@ -0,0 +1,192 @@ +import click +import utilities_common.cli as clicommon + +from sonic_py_common import logger +from utilities_common.bgp import ( + CFG_BGP_DEVICE_GLOBAL, + BGP_DEVICE_GLOBAL_KEY, + SYSLOG_IDENTIFIER, + to_str, +) + + +log = logger.Logger(SYSLOG_IDENTIFIER) +log.set_min_log_priority_info() + + +# +# BGP DB interface ---------------------------------------------------------------------------------------------------- +# + + +def update_entry_validated(db, table, key, data, create_if_not_exists=False): + """ Update entry in table and validate configuration. + If attribute value in data is None, the attribute is deleted. + + Args: + db (swsscommon.ConfigDBConnector): Config DB connector object. + table (str): Table name to add new entry to. + key (Union[str, Tuple]): Key name in the table. + data (Dict): Entry data. + create_if_not_exists (bool): + In case entry does not exists already a new entry + is not created if this flag is set to False and + creates a new entry if flag is set to True. + Raises: + Exception: when cfg does not satisfy YANG schema. + """ + + cfg = db.get_config() + cfg.setdefault(table, {}) + + if not data: + raise click.ClickException(f"No field/values to update {key}") + + if create_if_not_exists: + cfg[table].setdefault(key, {}) + + if key not in cfg[table]: + raise click.ClickException(f"{key} does not exist") + + entry_changed = False + for attr, value in data.items(): + if value == cfg[table][key].get(attr): + continue + entry_changed = True + if value is None: + cfg[table][key].pop(attr, None) + else: + cfg[table][key][attr] = value + + if not entry_changed: + return + + db.set_entry(table, key, cfg[table][key]) + + +# +# BGP handlers -------------------------------------------------------------------------------------------------------- +# + + +def tsa_handler(ctx, db, state): + """ Handle config updates for Traffic-Shift-Away (TSA) feature """ + + table = CFG_BGP_DEVICE_GLOBAL + key = BGP_DEVICE_GLOBAL_KEY + data = { + "tsa_enabled": state, + } + + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + log.log_notice("Configured TSA state: {}".format(to_str(state))) + except Exception as e: + log.log_error("Failed to configure TSA state: {}".format(str(e))) + ctx.fail(str(e)) + + +def wcmp_handler(ctx, db, state): + """ Handle config updates for Weighted-Cost Multi-Path (W-ECMP) feature """ + + table = CFG_BGP_DEVICE_GLOBAL + key = BGP_DEVICE_GLOBAL_KEY + data = { + "wcmp_enabled": state, + } + + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + log.log_notice("Configured W-ECMP state: {}".format(to_str(state))) + except Exception as e: + log.log_error("Failed to configure W-ECMP state: {}".format(str(e))) + ctx.fail(str(e)) + + +# +# BGP device-global --------------------------------------------------------------------------------------------------- +# + + +@click.group( + name="device-global", + cls=clicommon.AliasedGroup +) +def DEVICE_GLOBAL(): + """ Configure BGP device global state """ + + pass + + +# +# BGP device-global tsa ----------------------------------------------------------------------------------------------- +# + + +@DEVICE_GLOBAL.group( + name="tsa", + cls=clicommon.AliasedGroup +) +def DEVICE_GLOBAL_TSA(): + """ Configure Traffic-Shift-Away (TSA) feature """ + + pass + + +@DEVICE_GLOBAL_TSA.command( + name="enabled" +) +@clicommon.pass_db +@click.pass_context +def DEVICE_GLOBAL_TSA_ENABLED(ctx, db): + """ Enable Traffic-Shift-Away (TSA) feature """ + + tsa_handler(ctx, db, "true") + + +@DEVICE_GLOBAL_TSA.command( + name="disabled" +) +@clicommon.pass_db +@click.pass_context +def DEVICE_GLOBAL_TSA_DISABLED(ctx, db): + """ Disable Traffic-Shift-Away (TSA) feature """ + + tsa_handler(ctx, db, "false") + + +# +# BGP device-global w-ecmp -------------------------------------------------------------------------------------------- +# + + +@DEVICE_GLOBAL.group( + name="w-ecmp", + cls=clicommon.AliasedGroup +) +def DEVICE_GLOBAL_WCMP(): + """ Configure Weighted-Cost Multi-Path (W-ECMP) feature """ + + pass + + +@DEVICE_GLOBAL_WCMP.command( + name="enabled" +) +@clicommon.pass_db +@click.pass_context +def DEVICE_GLOBAL_WCMP_ENABLED(ctx, db): + """ Enable Weighted-Cost Multi-Path (W-ECMP) feature """ + + wcmp_handler(ctx, db, "true") + + +@DEVICE_GLOBAL_WCMP.command( + name="disabled" +) +@clicommon.pass_db +@click.pass_context +def DEVICE_GLOBAL_WCMP_DISABLED(ctx, db): + """ Disable Weighted-Cost Multi-Path (W-ECMP) feature """ + + wcmp_handler(ctx, db, "false") diff --git a/config/chassis_modules.py b/config/chassis_modules.py old mode 100644 new mode 100755 index e640779d16..4e7fd8096b --- a/config/chassis_modules.py +++ b/config/chassis_modules.py @@ -1,9 +1,14 @@ #!/usr/sbin/env python import click - +import time +import re +import subprocess import utilities_common.cli as clicommon +TIMEOUT_SECS = 10 + + # # 'chassis_modules' group ('config chassis_modules ...') # @@ -17,6 +22,81 @@ def modules(): """Configure chassis modules""" pass + +def get_config_module_state(db, chassis_module_name): + config_db = db.cfgdb + fvs = config_db.get_entry('CHASSIS_MODULE', chassis_module_name) + if not fvs: + return 'up' + else: + return fvs['admin_status'] + + +# +# Name: check_config_module_state_with_timeout +# return: True: timeout, False: not timeout +# +def check_config_module_state_with_timeout(ctx, db, chassis_module_name, state): + counter = 0 + while get_config_module_state(db, chassis_module_name) != state: + time.sleep(1) + counter += 1 + if counter >= TIMEOUT_SECS: + ctx.fail("get_config_module_state {} timeout".format(chassis_module_name)) + return True + return False + + +def get_asic_list_from_db(chassisdb, chassis_module_name): + asic_list = [] + asics_keys_list = chassisdb.keys("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE*") + for asic_key in asics_keys_list: + name = chassisdb.get("CHASSIS_STATE_DB", asic_key, "name") + if name == chassis_module_name: + asic_id = int(re.search(r"(\d+)$", asic_key).group()) + asic_list.append(asic_id) + return asic_list + + +# +# Syntax: fabric_module_set_admin_status <'up'/'down'> +# +def fabric_module_set_admin_status(db, chassis_module_name, state): + chassisdb = db.db + chassisdb.connect("CHASSIS_STATE_DB") + asic_list = get_asic_list_from_db(chassisdb, chassis_module_name) + + if len(asic_list) == 0: + return + + if state == "down": + for asic in asic_list: + click.echo("Stop swss@{} and peer services".format(asic)) + clicommon.run_command('sudo systemctl stop swss@{}.service'.format(asic)) + + is_active = subprocess.call(["systemctl", "is-active", "--quiet", "swss@{}.service".format(asic)]) + + if is_active == 0: # zero active, non-zero, inactive + click.echo("Stop swss@{} and peer services failed".format(asic)) + return + + click.echo("Delete related CAHSSIS_FABRIC_ASIC_TABLE entries") + + for asic in asic_list: + chassisdb.delete("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic" + str(asic)) + + # Start the services in case of the users just execute issue command "systemctl stop swss@/syncd@" + # without bring down the hardware + for asic in asic_list: + # To address systemd service restart limit by resetting the count + clicommon.run_command('sudo systemctl reset-failed swss@{}.service'.format(asic)) + click.echo("Start swss@{} and peer services".format(asic)) + clicommon.run_command('sudo systemctl start swss@{}.service'.format(asic)) + elif state == "up": + for asic in asic_list: + click.echo("Start swss@{} and peer services".format(asic)) + clicommon.run_command('sudo systemctl start swss@{}.service'.format(asic)) + # # 'shutdown' subcommand ('config chassis_modules shutdown ...') # @@ -33,8 +113,17 @@ def shutdown_chassis_module(db, chassis_module_name): not chassis_module_name.startswith("FABRIC-CARD"): ctx.fail("'module_name' has to begin with 'SUPERVISOR', 'LINE-CARD' or 'FABRIC-CARD'") + # To avoid duplicate operation + if get_config_module_state(db, chassis_module_name) == 'down': + click.echo("Module {} is already in down state".format(chassis_module_name)) + return + + click.echo("Shutting down chassis module {}".format(chassis_module_name)) fvs = {'admin_status': 'down'} config_db.set_entry('CHASSIS_MODULE', chassis_module_name, fvs) + if chassis_module_name.startswith("FABRIC-CARD"): + if not check_config_module_state_with_timeout(ctx, db, chassis_module_name, 'down'): + fabric_module_set_admin_status(db, chassis_module_name, 'down') # # 'startup' subcommand ('config chassis_modules startup ...') @@ -45,5 +134,15 @@ def shutdown_chassis_module(db, chassis_module_name): def startup_chassis_module(db, chassis_module_name): """Chassis-module startup of module""" config_db = db.cfgdb + ctx = click.get_current_context() + + # To avoid duplicate operation + if get_config_module_state(db, chassis_module_name) == 'up': + click.echo("Module {} is already set to up state".format(chassis_module_name)) + return + click.echo("Starting up chassis module {}".format(chassis_module_name)) config_db.set_entry('CHASSIS_MODULE', chassis_module_name, None) + if chassis_module_name.startswith("FABRIC-CARD"): + if not check_config_module_state_with_timeout(ctx, db, chassis_module_name, 'up'): + fabric_module_set_admin_status(db, chassis_module_name, 'up') diff --git a/config/main.py b/config/main.py index 41a9f48121..7509628a67 100644 --- a/config/main.py +++ b/config/main.py @@ -1,6 +1,8 @@ #!/usr/sbin/env python +import threading import click +import concurrent.futures import datetime import ipaddress import json @@ -20,6 +22,7 @@ from jsonpointer import JsonPointerException from collections import OrderedDict from generic_config_updater.generic_updater import GenericUpdater, ConfigFormat, extract_scope +from generic_config_updater.gu_common import HOST_NAMESPACE, GenericConfigUpdaterError from minigraph import parse_device_desc_xml, minigraph_encoder from natsort import natsorted from portconfig import get_child_ports @@ -27,9 +30,10 @@ from sonic_py_common import device_info, multi_asic from sonic_py_common.general import getstatusoutput_noshell from sonic_py_common.interface import get_interface_table_name, get_port_table_name, get_intf_longname +from sonic_yang_cfg_generator import SonicYangCfgDbGenerator from utilities_common import util_base from swsscommon import swsscommon -from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector +from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector, ConfigDBPipeConnector from utilities_common.db import Db from utilities_common.intf_filter import parse_interface_in_filter from utilities_common import bgp_util @@ -38,6 +42,7 @@ from utilities_common.general import load_db_config, load_module_from_source from .validated_config_db_connector import ValidatedConfigDBConnector import utilities_common.multi_asic as multi_asic_util +from utilities_common.flock import try_lock from .utils import log @@ -59,6 +64,7 @@ from . import syslog from . import switchport from . import dns +from . import bgp_cli # mock masic APIs for unit test @@ -119,6 +125,12 @@ GRE_TYPE_RANGE = click.IntRange(min=0, max=65535) ADHOC_VALIDATION = True +if os.environ.get("UTILITIES_UNIT_TESTING", "0") in ("1", "2"): + temp_system_reload_lockfile = tempfile.NamedTemporaryFile() + SYSTEM_RELOAD_LOCK = temp_system_reload_lockfile.name +else: + SYSTEM_RELOAD_LOCK = "/etc/sonic/reload.lock" + # Load sonic-cfggen from source since /usr/local/bin/sonic-cfggen does not have .py extension. sonic_cfggen = load_module_from_source('sonic_cfggen', '/usr/local/bin/sonic-cfggen') @@ -246,7 +258,7 @@ def breakout_Ports(cm, delPorts=list(), portJson=dict(), force=False, \ click.echo("*** Printing dependencies ***") for dep in deps: click.echo(dep) - sys.exit(0) + sys.exit(1) else: click.echo("[ERROR] Port breakout Failed!!! Opting Out") raise click.Abort() @@ -895,10 +907,47 @@ def _reset_failed_services(): for service in _get_sonic_services(): clicommon.run_command(['systemctl', 'reset-failed', str(service)]) + +def get_service_finish_timestamp(service): + out, _ = clicommon.run_command(['sudo', + 'systemctl', + 'show', + '--no-pager', + service, + '-p', + 'ExecMainExitTimestamp', + '--value'], + return_cmd=True) + return out.strip(' \t\n\r') + + +def wait_service_restart_finish(service, last_timestamp, timeout=30): + start_time = time.time() + elapsed_time = 0 + while elapsed_time < timeout: + current_timestamp = get_service_finish_timestamp(service) + if current_timestamp and (current_timestamp != last_timestamp): + return + + time.sleep(1) + elapsed_time = time.time() - start_time + + log.log_warning("Service: {} does not restart in {} seconds, stop waiting".format(service, timeout)) + + def _restart_services(): + last_interface_config_timestamp = get_service_finish_timestamp('interfaces-config') + last_networking_timestamp = get_service_finish_timestamp('networking') + click.echo("Restarting SONiC target ...") clicommon.run_command(['sudo', 'systemctl', 'restart', 'sonic.target']) + # These service will restart eth0 and cause device lost network for 10 seconds + # When enable TACACS, every remote user commands will authorize by TACACS service via network + # If load_minigraph exit before eth0 restart, commands after load_minigraph may failed + wait_service_restart_finish('interfaces-config', last_interface_config_timestamp) + wait_service_restart_finish('networking', last_networking_timestamp) + try: subprocess.check_call(['sudo', 'monit', 'status'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) click.echo("Enabling container monitoring ...") @@ -1155,25 +1204,173 @@ def validate_gre_type(ctx, _, value): return gre_type_value except ValueError: raise click.UsageError("{} is not a valid GRE type".format(value)) - + + +def multiasic_save_to_singlefile(db, filename): + """A function to save all asic's config to single file + """ + all_current_config = {} + cfgdb_clients = db.cfgdb_clients + + for ns, config_db in cfgdb_clients.items(): + current_config = config_db.get_config() + sonic_cfggen.FormatConverter.to_serialized(current_config) + asic_name = "localhost" if ns == DEFAULT_NAMESPACE else ns + all_current_config[asic_name] = sort_dict(current_config) + click.echo("Integrate each ASIC's config into a single JSON file {}.".format(filename)) + with open(filename, 'w') as file: + json.dump(all_current_config, file, indent=4) + + +def apply_patch_wrapper(args): + return apply_patch_for_scope(*args) + + # Function to apply patch for a single ASIC. def apply_patch_for_scope(scope_changes, results, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_path): scope, changes = scope_changes # Replace localhost to DEFAULT_NAMESPACE which is db definition of Host - if scope.lower() == "localhost" or scope == "": + if scope.lower() == HOST_NAMESPACE or scope == "": scope = multi_asic.DEFAULT_NAMESPACE - - scope_for_log = scope if scope else "localhost" + + scope_for_log = scope if scope else HOST_NAMESPACE + thread_id = threading.get_ident() + log.log_notice(f"apply_patch_for_scope started for {scope_for_log} by {changes} in thread:{thread_id}") + try: # Call apply_patch with the ASIC-specific changes and predefined parameters - GenericUpdater(namespace=scope).apply_patch(jsonpatch.JsonPatch(changes), config_format, verbose, dry_run, ignore_non_yang_tables, ignore_path) + GenericUpdater(scope=scope).apply_patch(jsonpatch.JsonPatch(changes), + config_format, + verbose, + dry_run, + ignore_non_yang_tables, + ignore_path) results[scope_for_log] = {"success": True, "message": "Success"} - log.log_notice(f"'apply-patch' executed successfully for {scope_for_log} by {changes}") + log.log_notice(f"'apply-patch' executed successfully for {scope_for_log} by {changes} in thread:{thread_id}") except Exception as e: results[scope_for_log] = {"success": False, "message": str(e)} log.log_error(f"'apply-patch' executed failed for {scope_for_log} by {changes} due to {str(e)}") +def validate_patch(patch): + try: + command = ["show", "runningconfiguration", "all"] + proc = subprocess.Popen(command, text=True, stdout=subprocess.PIPE) + all_running_config, returncode = proc.communicate() + if returncode: + log.log_notice(f"Fetch all runningconfiguration failed as output:{all_running_config}") + return False + + # Structure validation and simulate apply patch. + all_target_config = patch.apply(json.loads(all_running_config)) + + # Verify target config by YANG models + target_config = all_target_config.pop(HOST_NAMESPACE) if multi_asic.is_multi_asic() else all_target_config + target_config.pop("bgpraw", None) + if not SonicYangCfgDbGenerator().validate_config_db_json(target_config): + return False + + if multi_asic.is_multi_asic(): + for asic in multi_asic.get_namespace_list(): + target_config = all_target_config.pop(asic) + target_config.pop("bgpraw", None) + if not SonicYangCfgDbGenerator().validate_config_db_json(target_config): + return False + + return True + except Exception as e: + raise GenericConfigUpdaterError(f"Validate json patch: {patch} failed due to:{e}") + + +def multiasic_validate_single_file(filename): + ns_list = [DEFAULT_NAMESPACE, *multi_asic.get_namespace_list()] + file_input = read_json_file(filename) + file_ns_list = [DEFAULT_NAMESPACE if key == HOST_NAMESPACE else key for key in file_input] + if set(ns_list) != set(file_ns_list): + click.echo( + "Input file {} must contain all asics config. ns_list: {} file ns_list: {}".format( + filename, ns_list, file_ns_list) + ) + raise click.Abort() + + +def load_sysinfo_if_missing(asic_config): + device_metadata = asic_config.get('DEVICE_METADATA', {}) + platform = device_metadata.get("localhost", {}).get("platform") + mac = device_metadata.get("localhost", {}).get("mac") + if not platform: + log.log_warning("platform is missing from Input file") + return True + elif not mac: + log.log_warning("mac is missing from Input file") + return True + else: + return False + + +def flush_configdb(namespace=DEFAULT_NAMESPACE): + if namespace is DEFAULT_NAMESPACE: + config_db = ConfigDBConnector() + else: + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) + + config_db.connect() + client = config_db.get_redis_client(config_db.CONFIG_DB) + client.flushdb() + return client, config_db + + +def migrate_db_to_lastest(namespace=DEFAULT_NAMESPACE): + # Migrate DB contents to latest version + db_migrator = '/usr/local/bin/db_migrator.py' + if os.path.isfile(db_migrator) and os.access(db_migrator, os.X_OK): + if namespace is DEFAULT_NAMESPACE: + command = [db_migrator, '-o', 'migrate'] + else: + command = [db_migrator, '-o', 'migrate', '-n', namespace] + clicommon.run_command(command, display_cmd=True) + + +def multiasic_write_to_db(filename, load_sysinfo): + file_input = read_json_file(filename) + for ns in [DEFAULT_NAMESPACE, *multi_asic.get_namespace_list()]: + asic_name = HOST_NAMESPACE if ns == DEFAULT_NAMESPACE else ns + asic_config = file_input[asic_name] + + asic_load_sysinfo = True if load_sysinfo else False + if not asic_load_sysinfo: + asic_load_sysinfo = load_sysinfo_if_missing(asic_config) + + if asic_load_sysinfo: + cfg_hwsku = asic_config.get("DEVICE_METADATA", {}).\ + get("localhost", {}).get("hwsku") + if not cfg_hwsku: + click.secho("Could not get the HWSKU from config file, Exiting!", fg='magenta') + sys.exit(1) + + client, _ = flush_configdb(ns) + + if asic_load_sysinfo: + if ns is DEFAULT_NAMESPACE: + command = [str(SONIC_CFGGEN_PATH), '-H', '-k', str(cfg_hwsku), '--write-to-db'] + else: + command = [str(SONIC_CFGGEN_PATH), '-H', '-k', str(cfg_hwsku), '-n', str(ns), '--write-to-db'] + clicommon.run_command(command, display_cmd=True) + + if ns is DEFAULT_NAMESPACE: + config_db = ConfigDBPipeConnector(use_unix_socket_path=True) + else: + config_db = ConfigDBPipeConnector(use_unix_socket_path=True, namespace=ns) + + config_db.connect(False) + sonic_cfggen.FormatConverter.to_deserialized(asic_config) + data = sonic_cfggen.FormatConverter.output_to_db(asic_config) + config_db.mod_config(sonic_cfggen.FormatConverter.output_to_db(data)) + client.set(config_db.INIT_INDICATOR, 1) + + migrate_db_to_lastest(ns) + + # This is our main entrypoint - the main 'config' command @click.group(cls=clicommon.AbbreviationGroup, context_settings=CONTEXT_SETTINGS) @click.pass_context @@ -1241,7 +1438,8 @@ def config(ctx): @click.option('-y', '--yes', is_flag=True, callback=_abort_if_false, expose_value=False, prompt='Existing files will be overwritten, continue?') @click.argument('filename', required=False) -def save(filename): +@clicommon.pass_db +def save(db, filename): """Export current config DB to a file on disk.\n : Names of configuration file(s) to save, separated by comma with no spaces in between """ @@ -1256,7 +1454,13 @@ def save(filename): if filename is not None: cfg_files = filename.split(',') - if len(cfg_files) != num_cfg_file: + # If only one filename is provided in multi-ASIC mode, + # save all ASIC configurations to that single file. + if len(cfg_files) == 1 and multi_asic.is_multi_asic(): + filename = cfg_files[0] + multiasic_save_to_singlefile(db, filename) + return + elif len(cfg_files) != num_cfg_file: click.echo("Input {} config file(s) separated by comma for multiple files ".format(num_cfg_file)) return @@ -1362,11 +1566,12 @@ def print_dry_run_message(dry_run): help='format of config of the patch is either ConfigDb(ABNF) or SonicYang', show_default=True) @click.option('-d', '--dry-run', is_flag=True, default=False, help='test out the command without affecting config state') +@click.option('-p', '--parallel', is_flag=True, default=False, help='applying the change to all ASICs parallelly') @click.option('-n', '--ignore-non-yang-tables', is_flag=True, default=False, help='ignore validation for tables without YANG models', hidden=True) @click.option('-i', '--ignore-path', multiple=True, help='ignore validation for config specified by given path which is a JsonPointer', hidden=True) @click.option('-v', '--verbose', is_flag=True, default=False, help='print additional details of what the operation is doing') @click.pass_context -def apply_patch(ctx, patch_file_path, format, dry_run, ignore_non_yang_tables, ignore_path, verbose): +def apply_patch(ctx, patch_file_path, format, dry_run, parallel, ignore_non_yang_tables, ignore_path, verbose): """Apply given patch of updates to Config. A patch is a JsonPatch which follows rfc6902. This command can be used do partial updates to the config with minimum disruption to running processes. It allows addition as well as deletion of configs. The patch file represents a diff of ConfigDb(ABNF) @@ -1381,6 +1586,9 @@ def apply_patch(ctx, patch_file_path, format, dry_run, ignore_non_yang_tables, i patch_as_json = json.loads(text) patch = jsonpatch.JsonPatch(patch_as_json) + if not validate_patch(patch): + raise GenericConfigUpdaterError(f"Failed validating patch:{patch}") + results = {} config_format = ConfigFormat[format.upper()] # Initialize a dictionary to hold changes categorized by scope @@ -1403,20 +1611,39 @@ def apply_patch(ctx, patch_file_path, format, dry_run, ignore_non_yang_tables, i # Empty case to force validate YANG model. if not changes_by_scope: asic_list = [multi_asic.DEFAULT_NAMESPACE] - asic_list.extend(multi_asic.get_namespace_list()) + if multi_asic.is_multi_asic(): + asic_list.extend(multi_asic.get_namespace_list()) for asic in asic_list: changes_by_scope[asic] = [] # Apply changes for each scope - for scope_changes in changes_by_scope.items(): - apply_patch_for_scope(scope_changes, results, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_path) + if parallel: + with concurrent.futures.ThreadPoolExecutor() as executor: + # Prepare the argument tuples + arguments = [(scope_changes, results, config_format, + verbose, dry_run, ignore_non_yang_tables, ignore_path) + for scope_changes in changes_by_scope.items()] + + # Submit all tasks and wait for them to complete + futures = [executor.submit(apply_patch_wrapper, args) for args in arguments] + + # Wait for all tasks to complete + concurrent.futures.wait(futures) + else: + for scope_changes in changes_by_scope.items(): + apply_patch_for_scope(scope_changes, + results, + config_format, + verbose, dry_run, + ignore_non_yang_tables, + ignore_path) # Check if any updates failed failures = [scope for scope, result in results.items() if not result['success']] if failures: failure_messages = '\n'.join([f"- {failed_scope}: {results[failed_scope]['message']}" for failed_scope in failures]) - raise Exception(f"Failed to apply patch on the following scopes:\n{failure_messages}") + raise GenericConfigUpdaterError(f"Failed to apply patch on the following scopes:\n{failure_messages}") log.log_notice(f"Patch applied successfully for {patch}.") click.secho("Patch applied successfully.", fg="cyan", underline=True) @@ -1533,9 +1760,11 @@ def list_checkpoints(ctx, verbose): @click.option('-n', '--no_service_restart', default=False, is_flag=True, help='Do not restart docker services') @click.option('-f', '--force', default=False, is_flag=True, help='Force config reload without system checks') @click.option('-t', '--file_format', default='config_db',type=click.Choice(['config_yang', 'config_db']),show_default=True,help='specify the file format') +@click.option('-b', '--bypass-lock', default=False, is_flag=True, help='Do reload without acquiring lock') @click.argument('filename', required=False) @clicommon.pass_db -def reload(db, filename, yes, load_sysinfo, no_service_restart, force, file_format): +@try_lock(SYSTEM_RELOAD_LOCK, timeout=0) +def reload(db, filename, yes, load_sysinfo, no_service_restart, force, file_format, bypass_lock): """Clear current configuration and import a previous saved config DB dump file. : Names of configuration file(s) to load, separated by comma with no spaces in between """ @@ -1568,11 +1797,15 @@ def reload(db, filename, yes, load_sysinfo, no_service_restart, force, file_form if multi_asic.is_multi_asic() and file_format == 'config_db': num_cfg_file += num_asic + multiasic_single_file_mode = False # If the user give the filename[s], extract the file names. if filename is not None: cfg_files = filename.split(',') - if len(cfg_files) != num_cfg_file: + if len(cfg_files) == 1 and multi_asic.is_multi_asic(): + multiasic_validate_single_file(cfg_files[0]) + multiasic_single_file_mode = True + elif len(cfg_files) != num_cfg_file: click.echo("Input {} config file(s) separated by comma for multiple files ".format(num_cfg_file)) return @@ -1581,127 +1814,109 @@ def reload(db, filename, yes, load_sysinfo, no_service_restart, force, file_form log.log_notice("'reload' stopping services...") _stop_services() - # In Single ASIC platforms we have single DB service. In multi-ASIC platforms we have a global DB - # service running in the host + DB services running in each ASIC namespace created per ASIC. - # In the below logic, we get all namespaces in this platform and add an empty namespace '' - # denoting the current namespace which we are in ( the linux host ) - for inst in range(-1, num_cfg_file-1): - # Get the namespace name, for linux host it is None - if inst == -1: - namespace = None - else: - namespace = "{}{}".format(NAMESPACE_PREFIX, inst) - - # Get the file from user input, else take the default file /etc/sonic/config_db{NS_id}.json - if cfg_files: - file = cfg_files[inst+1] - # Save to tmpfile in case of stdin input which can only be read once - if file == "/dev/stdin": - file_input = read_json_file(file) - (_, tmpfname) = tempfile.mkstemp(dir="/tmp", suffix="_configReloadStdin") - write_json_file(file_input, tmpfname) - file = tmpfname - else: - if file_format == 'config_db': - if namespace is None: - file = DEFAULT_CONFIG_DB_FILE - else: - file = "/etc/sonic/config_db{}.json".format(inst) + if multiasic_single_file_mode: + multiasic_write_to_db(cfg_files[0], load_sysinfo) + else: + # In Single ASIC platforms we have single DB service. In multi-ASIC platforms we have a global DB + # service running in the host + DB services running in each ASIC namespace created per ASIC. + # In the below logic, we get all namespaces in this platform and add an empty namespace '' + # denoting the current namespace which we are in ( the linux host ) + for inst in range(-1, num_cfg_file-1): + # Get the namespace name, for linux host it is DEFAULT_NAMESPACE + if inst == -1: + namespace = DEFAULT_NAMESPACE else: - file = DEFAULT_CONFIG_YANG_FILE - - - # Check the file exists before proceeding. - if not os.path.exists(file): - click.echo("The config file {} doesn't exist".format(file)) - continue + namespace = "{}{}".format(NAMESPACE_PREFIX, inst) + + # Get the file from user input, else take the default file /etc/sonic/config_db{NS_id}.json + if cfg_files: + file = cfg_files[inst+1] + # Save to tmpfile in case of stdin input which can only be read once + if file == "/dev/stdin": + file_input = read_json_file(file) + (_, tmpfname) = tempfile.mkstemp(dir="/tmp", suffix="_configReloadStdin") + write_json_file(file_input, tmpfname) + file = tmpfname + else: + if file_format == 'config_db': + if namespace is DEFAULT_NAMESPACE: + file = DEFAULT_CONFIG_DB_FILE + else: + file = "/etc/sonic/config_db{}.json".format(inst) + else: + file = DEFAULT_CONFIG_YANG_FILE - if file_format == 'config_db': - file_input = read_json_file(file) + # Check the file exists before proceeding. + if not os.path.exists(file): + click.echo("The config file {} doesn't exist".format(file)) + continue - platform = file_input.get("DEVICE_METADATA", {}).\ - get("localhost", {}).get("platform") - mac = file_input.get("DEVICE_METADATA", {}).\ - get("localhost", {}).get("mac") + if file_format == 'config_db': + file_input = read_json_file(file) + if not load_sysinfo: + load_sysinfo = load_sysinfo_if_missing(file_input) + + if load_sysinfo: + try: + command = [SONIC_CFGGEN_PATH, "-j", file, '-v', "DEVICE_METADATA.localhost.hwsku"] + proc = subprocess.Popen(command, text=True, stdout=subprocess.PIPE) + output, err = proc.communicate() + + except FileNotFoundError as e: + click.echo("{}".format(str(e)), err=True) + raise click.Abort() + except Exception as e: + click.echo("{}\n{}".format(type(e), str(e)), err=True) + raise click.Abort() + + if not output: + click.secho("Could not get the HWSKU from config file, Exiting!!!", fg='magenta') + sys.exit(1) - if not platform or not mac: - log.log_warning("Input file does't have platform or mac. platform: {}, mac: {}" - .format(None if platform is None else platform, None if mac is None else mac)) - load_sysinfo = True + cfg_hwsku = output.strip() - if load_sysinfo: - try: - command = [SONIC_CFGGEN_PATH, "-j", file, '-v', "DEVICE_METADATA.localhost.hwsku"] - proc = subprocess.Popen(command, text=True, stdout=subprocess.PIPE) - output, err = proc.communicate() + client, config_db = flush_configdb(namespace) - except FileNotFoundError as e: - click.echo("{}".format(str(e)), err=True) - raise click.Abort() - except Exception as e: - click.echo("{}\n{}".format(type(e), str(e)), err=True) - raise click.Abort() + if load_sysinfo: + if namespace is DEFAULT_NAMESPACE: + command = [ + str(SONIC_CFGGEN_PATH), '-H', '-k', str(cfg_hwsku), '--write-to-db'] + else: + command = [ + str(SONIC_CFGGEN_PATH), '-H', '-k', str(cfg_hwsku), '-n', str(namespace), '--write-to-db'] + clicommon.run_command(command, display_cmd=True) - if not output: - click.secho("Could not get the HWSKU from config file, Exiting!!!", fg='magenta') - sys.exit(1) + # For the database service running in linux host we use the file user gives as input + # or by default DEFAULT_CONFIG_DB_FILE. In the case of database service running in namespace, + # the default config_db.json format is used. - cfg_hwsku = output.strip() + config_gen_opts = [] - if namespace is None: - config_db = ConfigDBConnector() - else: - config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) + if os.path.isfile(INIT_CFG_FILE): + config_gen_opts += ['-j', str(INIT_CFG_FILE)] - config_db.connect() - client = config_db.get_redis_client(config_db.CONFIG_DB) - client.flushdb() - - if load_sysinfo: - if namespace is None: - command = [str(SONIC_CFGGEN_PATH), '-H', '-k', str(cfg_hwsku), '--write-to-db'] + if file_format == 'config_db': + config_gen_opts += ['-j', str(file)] else: - command = [str(SONIC_CFGGEN_PATH), '-H', '-k', str(cfg_hwsku), '-n', str(namespace), '--write-to-db'] - clicommon.run_command(command, display_cmd=True) - - # For the database service running in linux host we use the file user gives as input - # or by default DEFAULT_CONFIG_DB_FILE. In the case of database service running in namespace, - # the default config_db.json format is used. - + config_gen_opts += ['-Y', str(file)] - config_gen_opts = [] + if namespace is not DEFAULT_NAMESPACE: + config_gen_opts += ['-n', str(namespace)] - if os.path.isfile(INIT_CFG_FILE): - config_gen_opts += ['-j', str(INIT_CFG_FILE)] + command = [SONIC_CFGGEN_PATH] + config_gen_opts + ['--write-to-db'] - if file_format == 'config_db': - config_gen_opts += ['-j', str(file)] - else: - config_gen_opts += ['-Y', str(file)] - - if namespace is not None: - config_gen_opts += ['-n', str(namespace)] - - command = [SONIC_CFGGEN_PATH] + config_gen_opts + ['--write-to-db'] - - clicommon.run_command(command, display_cmd=True) - client.set(config_db.INIT_INDICATOR, 1) + clicommon.run_command(command, display_cmd=True) + client.set(config_db.INIT_INDICATOR, 1) - if os.path.exists(file) and file.endswith("_configReloadStdin"): - # Remove tmpfile - try: - os.remove(file) - except OSError as e: - click.echo("An error occurred while removing the temporary file: {}".format(str(e)), err=True) + if os.path.exists(file) and file.endswith("_configReloadStdin"): + # Remove tmpfile + try: + os.remove(file) + except OSError as e: + click.echo("An error occurred while removing the temporary file: {}".format(str(e)), err=True) - # Migrate DB contents to latest version - db_migrator='/usr/local/bin/db_migrator.py' - if os.path.isfile(db_migrator) and os.access(db_migrator, os.X_OK): - if namespace is None: - command = [db_migrator, '-o', 'migrate'] - else: - command = [db_migrator, '-o', 'migrate', '-n', str(namespace)] - clicommon.run_command(command, display_cmd=True) + # Migrate DB contents to latest version + migrate_db_to_lastest(namespace) # Re-generate the environment variable in case config_db.json was edited update_sonic_environment() @@ -1762,8 +1977,10 @@ def load_mgmt_config(filename): @click.option('-t', '--traffic_shift_away', default=False, is_flag=True, help='Keep device in maintenance with TSA') @click.option('-o', '--override_config', default=False, is_flag=True, help='Enable config override. Proceed with default path.') @click.option('-p', '--golden_config_path', help='Provide golden config path to override. Use with --override_config') +@click.option('-b', '--bypass-lock', default=False, is_flag=True, help='Do load minigraph without acquiring lock') @clicommon.pass_db -def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, golden_config_path): +@try_lock(SYSTEM_RELOAD_LOCK, timeout=0) +def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, golden_config_path, bypass_lock): """Reconfigure based on minigraph.""" argv_str = ' '.join(['config', *sys.argv[1:]]) log.log_notice(f"'load_minigraph' executing with command: {argv_str}") @@ -1777,6 +1994,14 @@ def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, fg='magenta') raise click.Abort() + # Dependency check golden config json + config_to_check = read_json_file(golden_config_path) + if multi_asic.is_multi_asic(): + host_config = config_to_check.get('localhost', {}) + else: + host_config = config_to_check + table_hard_dependency_check(host_config) + #Stop services before config push if not no_service_restart: log.log_notice("'load_minigraph' stopping services...") @@ -1995,8 +2220,8 @@ def override_config_table(db, input_config_db, dry_run): if multi_asic.is_multi_asic() and len(config_input): # Golden Config will use "localhost" to represent host name if ns == DEFAULT_NAMESPACE: - if "localhost" in config_input.keys(): - ns_config_input = config_input["localhost"] + if HOST_NAMESPACE in config_input.keys(): + ns_config_input = config_input[HOST_NAMESPACE] else: click.secho("Wrong config format! 'localhost' not found in host config! cannot override.. abort") sys.exit(1) @@ -2151,18 +2376,6 @@ def synchronous_mode(sync_mode): config reload -y \n Option 2. systemctl restart swss""" % sync_mode) -# -# 'suppress-fib-pending' command ('config suppress-fib-pending ...') -# -@config.command('suppress-fib-pending') -@click.argument('state', metavar='', required=True, type=click.Choice(['enabled', 'disabled'])) -@clicommon.pass_db -def suppress_pending_fib(db, state): - ''' Enable or disable pending FIB suppression. Once enabled, BGP will not advertise routes that are not yet installed in the hardware ''' - - config_db = db.cfgdb - config_db.mod_entry('DEVICE_METADATA' , 'localhost', {"suppress-fib-pending" : state}) - # # 'yang_config_validation' command ('config yang_config_validation ...') # @@ -3296,7 +3509,10 @@ def add_snmp_agent_address(ctx, agentip, port, vrf): """Add the SNMP agent listening IP:Port%Vrf configuration""" #Construct SNMP_AGENT_ADDRESS_CONFIG table key in the format ip|| - if not clicommon.is_ipaddress(agentip): + # Link local IP address should be provided along with zone id + # % for ex fe80::1%eth0 + agent_ip_addr = agentip.split('%')[0] + if not clicommon.is_ipaddress(agent_ip_addr): click.echo("Invalid IP address") return False config_db = ctx.obj['db'] @@ -3306,7 +3522,7 @@ def add_snmp_agent_address(ctx, agentip, port, vrf): click.echo("ManagementVRF is Enabled. Provide vrf.") return False found = 0 - ip = ipaddress.ip_address(agentip) + ip = ipaddress.ip_address(agent_ip_addr) for intf in netifaces.interfaces(): ipaddresses = netifaces.ifaddresses(intf) if ip_family[ip.version] in ipaddresses: @@ -3984,6 +4200,11 @@ def bgp(): """BGP-related configuration tasks""" pass + + +# BGP module extensions +config.commands['bgp'].add_command(bgp_cli.DEVICE_GLOBAL) + # # 'shutdown' subgroup ('config bgp shutdown ...') # @@ -4519,7 +4740,7 @@ def breakout(ctx, interface_name, mode, verbose, force_remove_dependencies, load except Exception as e: click.secho("Failed to break out Port. Error: {}".format(str(e)), fg='magenta') - sys.exit(0) + sys.exit(1) def _get_all_mgmtinterface_keys(): """Returns list of strings containing mgmt interface keys @@ -4646,6 +4867,14 @@ def add_interface_ip(ctx, interface_name, ip_addr, gw, secondary): interface_name = interface_alias_to_name(config_db, interface_name) if interface_name is None: ctx.fail("'interface_name' is None!") + # Add a validation to check this interface is not a member in vlan before + # changing it to a router port mode + vlan_member_table = config_db.get_table('VLAN_MEMBER') + + if (interface_is_in_vlan(vlan_member_table, interface_name)): + click.echo("Interface {} is a member of vlan\nAborting!".format(interface_name)) + return + portchannel_member_table = config_db.get_table('PORTCHANNEL_MEMBER') @@ -6170,7 +6399,8 @@ def remove_reasons(counter_name, reasons, verbose): @click.option('-ydrop', metavar='', type=click.IntRange(0, 100), help="Set yellow drop probability") @click.option('-gdrop', metavar='', type=click.IntRange(0, 100), help="Set green drop probability") @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") -def ecn(profile, rmax, rmin, ymax, ymin, gmax, gmin, rdrop, ydrop, gdrop, verbose): +@multi_asic_util.multi_asic_click_option_namespace +def ecn(profile, rmax, rmin, ymax, ymin, gmax, gmin, rdrop, ydrop, gdrop, verbose, namespace): """ECN-related configuration tasks""" log.log_info("'ecn -profile {}' executing...".format(profile)) command = ['ecnconfig', '-p', str(profile)] @@ -6184,6 +6414,8 @@ def ecn(profile, rmax, rmin, ymax, ymin, gmax, gmin, rdrop, ydrop, gdrop, verbos if ydrop is not None: command += ['-ydrop', str(ydrop)] if gdrop is not None: command += ['-gdrop', str(gdrop)] if verbose: command += ["-vv"] + if namespace is not None: + command += ['-n', str(namespace)] clicommon.run_command(command, display_cmd=verbose) diff --git a/config/plugins/mlnx.py b/config/plugins/mlnx.py index accf944ce6..f61335d4f4 100644 --- a/config/plugins/mlnx.py +++ b/config/plugins/mlnx.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES. +# Copyright (c) 2017-2024 NVIDIA CORPORATION & AFFILIATES. # Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/consutil/lib.py b/consutil/lib.py index 1d7f967bd3..e597e3b643 100644 --- a/consutil/lib.py +++ b/consutil/lib.py @@ -277,7 +277,7 @@ def init_device_prefix(): @staticmethod def list_console_ttys(): """Lists all console tty devices""" - cmd = ["ls", SysInfoProvider.DEVICE_PREFIX + "*"] + cmd = ["bash", "-c", "ls " + SysInfoProvider.DEVICE_PREFIX + "*"] output, _ = SysInfoProvider.run_command(cmd, abort=False) ttys = output.split('\n') ttys = list([dev for dev in ttys if re.match(SysInfoProvider.DEVICE_PREFIX + r"\d+", dev) != None]) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index dee689b9b8..689ca23b73 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -47,6 +47,8 @@ * [CMIS firmware version show commands](#cmis-firmware-version-show-commands) * [CMIS firmware upgrade commands](#cmis-firmware-upgrade-commands) * [CMIS firmware target mode commands](#cmis-firmware-target-mode-commands) +* [CMIS debug](#cmis-debug) +* [CMIS debug loopback](#cmis-debug-loopback) * [DHCP Relay](#dhcp-relay) * [DHCP Relay show commands](#dhcp-relay-show-commands) * [DHCP Relay clear commands](#dhcp-relay-clear-commands) @@ -2610,24 +2612,24 @@ This command displays the routing policy that takes precedence over the other ro Exit routemap ``` -**show suppress-fib-pending** +**show bgp device-global** -This command is used to show the status of suppress pending FIB feature. -When enabled, BGP will not advertise routes which aren't yet offloaded. +This command displays BGP device global configuration. - Usage: - ``` - show suppress-fib-pending + ```bash + show bgp device-global ``` -- Examples: - ``` - admin@sonic:~$ show suppress-fib-pending - Enabled - ``` - ``` - admin@sonic:~$ show suppress-fib-pending - Disabled +- Options: + - _-j,--json_: display in JSON format + +- Example: + ```bash + admin@sonic:~$ show bgp device-global + TSA W-ECMP + ------- ------- + enabled enabled ``` Go Back To [Beginning of the document](#) or [Beginning of this section](#bgp) @@ -2722,22 +2724,24 @@ This command is used to remove particular IPv4 or IPv6 BGP neighbor configuratio admin@sonic:~$ sudo config bgp remove neighbor SONIC02SPINE ``` -**config suppress-fib-pending** +**config bgp device-global tsa/w-ecmp** + +This command is used to manage BGP device global configuration. -This command is used to enable or disable announcements of routes not yet installed in the HW. -Once enabled, BGP will not advertise routes which aren't yet offloaded. +Feature list: +1. TSA - Traffic-Shift-Away +2. W-ECMP - Weighted-Cost Multi-Path - Usage: - ``` - config suppress-fib-pending + ```bash + config bgp device-global tsa + config bgp device-global w-ecmp ``` - Examples: - ``` - admin@sonic:~$ sudo config suppress-fib-pending enabled - ``` - ``` - admin@sonic:~$ sudo config suppress-fib-pending disabled + ```bash + admin@sonic:~$ config bgp device-global tsa enabled + admin@sonic:~$ config bgp device-global w-ecmp enabled ``` Go Back To [Beginning of the document](#) or [Beginning of this section](#bgp) @@ -3092,6 +3096,31 @@ Example of the module supporting target mode Target Mode set to 1 ``` +## CMIS debug + +### CMIS debug loopback + +This command is the standard CMIS diagnostic control used for troubleshooting link and performance issues between the host switch and transceiver module. + +**sfputil debug loopback** + +- Usage: + ``` + sfputil debug loopback PORT_NAME LOOPBACK_MODE + + Set the loopback mode + host-side-input: host side input loopback mode + host-side-output: host side output loopback mode + media-side-input: media side input loopback mode + media-side-output: media side output loopback mode + none: disable loopback mode + ``` + +- Example: + ``` + admin@sonic:~$ sfputil debug loopback Ethernet88 host-side-input + ``` + ## DHCP Relay ### DHCP Relay show commands diff --git a/generic_config_updater/change_applier.py b/generic_config_updater/change_applier.py index 32a356bf9a..8d8d23f87a 100644 --- a/generic_config_updater/change_applier.py +++ b/generic_config_updater/change_applier.py @@ -16,6 +16,7 @@ print_to_console = False + def set_verbose(verbose=False): global print_to_console, logger @@ -34,11 +35,12 @@ def log_error(m): logger.log(logger.LOG_PRIORITY_ERROR, m, print_to_console) -def get_config_db(namespace=multi_asic.DEFAULT_NAMESPACE): - config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) +def get_config_db(scope=multi_asic.DEFAULT_NAMESPACE): + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=scope) config_db.connect() return config_db + def set_config(config_db, tbl, key, data): config_db.set_entry(tbl, key, data) @@ -61,11 +63,9 @@ class DryRunChangeApplier: def __init__(self, config_wrapper): self.config_wrapper = config_wrapper - def apply(self, change): self.config_wrapper.apply_change_to_config_db(change) - def remove_backend_tables_from_config(self, data): return data @@ -74,9 +74,9 @@ class ChangeApplier: updater_conf = None - def __init__(self, namespace=multi_asic.DEFAULT_NAMESPACE): - self.namespace = namespace - self.config_db = get_config_db(self.namespace) + def __init__(self, scope=multi_asic.DEFAULT_NAMESPACE): + self.scope = scope + self.config_db = get_config_db(self.scope) self.backend_tables = [ "BUFFER_PG", "BUFFER_PROFILE", @@ -86,7 +86,6 @@ def __init__(self, namespace=multi_asic.DEFAULT_NAMESPACE): with open(UPDATER_CONF_FILE, "r") as s: ChangeApplier.updater_conf = json.load(s) - def _invoke_cmd(self, cmd, old_cfg, upd_cfg, keys): # cmd is in the format as . # @@ -98,7 +97,6 @@ def _invoke_cmd(self, cmd, old_cfg, upd_cfg, keys): return method_to_call(old_cfg, upd_cfg, keys) - def _services_validate(self, old_cfg, upd_cfg, keys): lst_svcs = set() lst_cmds = set() @@ -124,7 +122,6 @@ def _services_validate(self, old_cfg, upd_cfg, keys): log_debug("service invoked: {}".format(cmd)) return 0 - def _upd_data(self, tbl, run_tbl, upd_tbl, upd_keys): for key in set(run_tbl.keys()).union(set(upd_tbl.keys())): run_data = run_tbl.get(key, None) @@ -135,20 +132,17 @@ def _upd_data(self, tbl, run_tbl, upd_tbl, upd_keys): upd_keys[tbl][key] = {} log_debug("Patch affected tbl={} key={}".format(tbl, key)) - def _report_mismatch(self, run_data, upd_data): log_error("run_data vs expected_data: {}".format( str(jsondiff.diff(run_data, upd_data))[0:40])) - def apply(self, change): run_data = self._get_running_config() upd_data = prune_empty_table(change.apply(copy.deepcopy(run_data))) upd_keys = defaultdict(dict) for tbl in sorted(set(run_data.keys()).union(set(upd_data.keys()))): - self._upd_data(tbl, run_data.get(tbl, {}), - upd_data.get(tbl, {}), upd_keys) + self._upd_data(tbl, run_data.get(tbl, {}), upd_data.get(tbl, {}), upd_keys) ret = self._services_validate(run_data, upd_data, upd_keys) if not ret: @@ -168,9 +162,9 @@ def remove_backend_tables_from_config(self, data): def _get_running_config(self): _, fname = tempfile.mkstemp(suffix="_changeApplier") - - if self.namespace: - cmd = ['sonic-cfggen', '-d', '--print-data', '-n', self.namespace] + + if self.scope: + cmd = ['sonic-cfggen', '-d', '--print-data', '-n', self.scope] else: cmd = ['sonic-cfggen', '-d', '--print-data'] @@ -181,7 +175,9 @@ def _get_running_config(self): return_code = result.returncode if return_code: os.remove(fname) - raise GenericConfigUpdaterError(f"Failed to get running config for namespace: {self.namespace}, Return code: {return_code}, Error: {err}") + raise GenericConfigUpdaterError( + f"Failed to get running config for scope: {self.scope}," + + f"Return code: {return_code}, Error: {err}") run_data = {} try: diff --git a/generic_config_updater/gcu_field_operation_validators.conf.json b/generic_config_updater/gcu_field_operation_validators.conf.json index 68e49b6c03..a379e7282f 100644 --- a/generic_config_updater/gcu_field_operation_validators.conf.json +++ b/generic_config_updater/gcu_field_operation_validators.conf.json @@ -20,9 +20,9 @@ "spc1": [ "ACS-MSN2700", "ACS-MSN2740", "ACS-MSN2100", "ACS-MSN2410", "ACS-MSN2010", "Mellanox-SN2700", "Mellanox-SN2700-C28D8", "Mellanox-SN2700-D40C8S8", "Mellanox-SN2700-D44C10", "Mellanox-SN2700-D48C8", "ACS-MSN2700-A1", "Mellanox-SN2700-A1", "Mellanox-SN2700-A1-C28D8", "Mellanox-SN2700-A1-D40C8S8", "Mellanox-SN2700-A1-D44C10", "Mellanox-SN2700-A1-D48C8" ], "spc2": [ "ACS-MSN3800", "Mellanox-SN3800-D112C8", "ACS-MSN3420", "ACS-MSN3700C", "ACS-MSN3700", "Mellanox-SN3800-C64", "Mellanox-SN3800-D100C12S2", "Mellanox-SN3800-D24C52", "Mellanox-SN3800-D28C49S1", "Mellanox-SN3800-D28C50" ], - "spc3": [ "ACS-MSN4700", "ACS-MSN4600", "ACS-MSN4600C", "ACS-MSN4410", "Mellanox-SN4600C-D112C8", "Mellanox-SN4600C-C64", "Mellanox-SN4700-O8C48", "Mellanox-SN4600C-D100C12S2", "Mellanox-SN4600C-D48C40", - "Mellanox-SN4700-A96C8V8", "Mellanox-SN4700-C128", "Mellanox-SN4700-O28", "Mellanox-SN4700-O8V48", "Mellanox-SN4700-V48C32"], - "spc4": [ "ACS-SN5600", "Mellanox-SN5600-O128", "ACS-SN5400" ] + "spc3": [ "ACS-MSN4700", "ACS-MSN4600", "ACS-MSN4600C", "ACS-MSN4410", "ACS-SN4280", "Mellanox-SN4600C-D112C8", "Mellanox-SN4600C-C64", "Mellanox-SN4700-O8C48", "Mellanox-SN4600C-D100C12S2", "Mellanox-SN4600C-D48C40","Mellanox-SN4700-O32","Mellanox-SN4700-V64", + "Mellanox-SN4700-A96C8V8", "Mellanox-SN4700-C128", "Mellanox-SN4700-O28", "Mellanox-SN4700-O8V48", "Mellanox-SN4700-V48C32", "Mellanox-SN4280-O28"], + "spc4": [ "ACS-SN5600", "Mellanox-SN5600-O128", "Mellanox-SN5600-V256", "ACS-SN5400" ] }, "broadcom_asics": { "th": [ "Force10-S6100", "Arista-7060CX-32S-C32", "Arista-7060CX-32S-C32-T1", "Arista-7060CX-32S-D48C8", "Celestica-DX010-C32", "Seastone-DX010" ], diff --git a/generic_config_updater/generic_updater.py b/generic_config_updater/generic_updater.py index b75939749c..8ce27455bb 100644 --- a/generic_config_updater/generic_updater.py +++ b/generic_config_updater/generic_updater.py @@ -1,43 +1,74 @@ import json import jsonpointer import os +import subprocess + from enum import Enum -from .gu_common import GenericConfigUpdaterError, EmptyTableError, ConfigWrapper, \ - DryRunConfigWrapper, PatchWrapper, genericUpdaterLogging +from .gu_common import HOST_NAMESPACE, GenericConfigUpdaterError, EmptyTableError, ConfigWrapper, \ + DryRunConfigWrapper, PatchWrapper, genericUpdaterLogging from .patch_sorter import StrictPatchSorter, NonStrictPatchSorter, ConfigSplitter, \ - TablesWithoutYangConfigSplitter, IgnorePathsFromYangConfigSplitter + TablesWithoutYangConfigSplitter, IgnorePathsFromYangConfigSplitter from .change_applier import ChangeApplier, DryRunChangeApplier from sonic_py_common import multi_asic CHECKPOINTS_DIR = "/etc/sonic/checkpoints" CHECKPOINT_EXT = ".cp.json" + def extract_scope(path): if not path: raise Exception("Wrong patch with empty path.") - - try: - pointer = jsonpointer.JsonPointer(path) - parts = pointer.parts - except Exception as e: - raise Exception(f"Error resolving path: '{path}' due to {e}") - + pointer = jsonpointer.JsonPointer(path) + parts = pointer.parts if not parts: - raise Exception("Wrong patch with empty path.") + raise GenericConfigUpdaterError("Wrong patch with empty path.") if parts[0].startswith("asic"): if not parts[0][len("asic"):].isnumeric(): - raise Exception(f"Error resolving path: '{path}' due to incorrect ASIC number.") + raise GenericConfigUpdaterError(f"Error resolving path: '{path}' due to incorrect ASIC number.") scope = parts[0] remainder = "/" + "/".join(parts[1:]) - elif parts[0] == "localhost": - scope = "localhost" + elif parts[0] == HOST_NAMESPACE: + scope = HOST_NAMESPACE remainder = "/" + "/".join(parts[1:]) else: + if multi_asic.is_multi_asic(): + raise GenericConfigUpdaterError(f"Multi ASIC must have namespace prefix in path: '{path}'.") + scope = "" remainder = path - return scope, remainder + +def get_cmd_output(cmd): + proc = subprocess.Popen(cmd, text=True, stdout=subprocess.PIPE) + return proc.communicate()[0], proc.returncode + + +def get_config_json(): + scope_list = [multi_asic.DEFAULT_NAMESPACE] + all_running_config = {} + if multi_asic.is_multi_asic(): + scope_list.extend(multi_asic.get_namespace_list()) + for scope in scope_list: + command = ["sonic-cfggen", "-d", "--print-data"] + if scope != multi_asic.DEFAULT_NAMESPACE: + command += ["-n", scope] + + running_config_text, returncode = get_cmd_output(command) + if returncode: + raise GenericConfigUpdaterError( + f"Fetch all runningconfiguration failed as output:{running_config_text}") + running_config = json.loads(running_config_text) + + if multi_asic.is_multi_asic(): + if scope == multi_asic.DEFAULT_NAMESPACE: + scope = HOST_NAMESPACE + all_running_config[scope] = running_config + else: + all_running_config = running_config + return all_running_config + + class ConfigLock: def acquire_lock(self): # TODO: Implement ConfigLock @@ -52,22 +83,23 @@ class ConfigFormat(Enum): CONFIGDB = 1 SONICYANG = 2 + class PatchApplier: def __init__(self, patchsorter=None, changeapplier=None, config_wrapper=None, patch_wrapper=None, - namespace=multi_asic.DEFAULT_NAMESPACE): - self.namespace = namespace + scope=multi_asic.DEFAULT_NAMESPACE): + self.scope = scope self.logger = genericUpdaterLogging.get_logger(title="Patch Applier", print_all_to_console=True) - self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(namespace=self.namespace) - self.patch_wrapper = patch_wrapper if patch_wrapper is not None else PatchWrapper(namespace=self.namespace) + self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(scope=self.scope) + self.patch_wrapper = patch_wrapper if patch_wrapper is not None else PatchWrapper(scope=self.scope) self.patchsorter = patchsorter if patchsorter is not None else StrictPatchSorter(self.config_wrapper, self.patch_wrapper) - self.changeapplier = changeapplier if changeapplier is not None else ChangeApplier(namespace=self.namespace) + self.changeapplier = changeapplier if changeapplier is not None else ChangeApplier(scope=self.scope) def apply(self, patch, sort=True): - scope = self.namespace if self.namespace else 'localhost' + scope = self.scope if self.scope else HOST_NAMESPACE self.logger.log_notice(f"{scope}: Patch application starting.") self.logger.log_notice(f"{scope}: Patch: {patch}") @@ -84,15 +116,14 @@ def apply(self, patch, sort=True): self.config_wrapper.validate_field_operation(old_config, target_config) # Validate target config does not have empty tables since they do not show up in ConfigDb - self.logger.log_notice(f"{scope}: alidating target config does not have empty tables, " \ - "since they do not show up in ConfigDb.") + self.logger.log_notice(f"""{scope}: validating target config does not have empty tables, + since they do not show up in ConfigDb.""") empty_tables = self.config_wrapper.get_empty_tables(target_config) - if empty_tables: # if there are empty tables + if empty_tables: # if there are empty tables empty_tables_txt = ", ".join(empty_tables) - raise EmptyTableError(f"{scope}: given patch is not valid because it will result in empty tables " \ - "which is not allowed in ConfigDb. " \ - f"Table{'s' if len(empty_tables) != 1 else ''}: {empty_tables_txt}") - + raise EmptyTableError(f"{scope}: given patch is not valid because it will result in empty tables \ + which is not allowed in ConfigDb. \ + Table{'s' if len(empty_tables) != 1 else ''}: {empty_tables_txt}") # Generate list of changes to apply if sort: self.logger.log_notice(f"{scope}: sorting patch updates.") @@ -105,9 +136,6 @@ def apply(self, patch, sort=True): self.logger.log_notice(f"The {scope} patch was converted into {changes_len} " \ f"change{'s' if changes_len != 1 else ''}{':' if changes_len > 0 else '.'}") - for change in changes: - self.logger.log_notice(f" * {change}") - # Apply changes in order self.logger.log_notice(f"{scope}: applying {changes_len} change{'s' if changes_len != 1 else ''} " \ f"in order{':' if changes_len > 0 else '.'}") @@ -120,19 +148,19 @@ def apply(self, patch, sort=True): new_config = self.config_wrapper.get_config_db_as_json() self.changeapplier.remove_backend_tables_from_config(target_config) self.changeapplier.remove_backend_tables_from_config(new_config) - if not(self.patch_wrapper.verify_same_json(target_config, new_config)): + if not (self.patch_wrapper.verify_same_json(target_config, new_config)): raise GenericConfigUpdaterError(f"{scope}: after applying patch to config, there are still some parts not updated") self.logger.log_notice(f"{scope} patch application completed.") class ConfigReplacer: - def __init__(self, patch_applier=None, config_wrapper=None, patch_wrapper=None, namespace=multi_asic.DEFAULT_NAMESPACE): - self.namespace = namespace + def __init__(self, patch_applier=None, config_wrapper=None, patch_wrapper=None, scope=multi_asic.DEFAULT_NAMESPACE): + self.scope = scope self.logger = genericUpdaterLogging.get_logger(title="Config Replacer", print_all_to_console=True) - self.patch_applier = patch_applier if patch_applier is not None else PatchApplier(namespace=self.namespace) - self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(namespace=self.namespace) - self.patch_wrapper = patch_wrapper if patch_wrapper is not None else PatchWrapper(namespace=self.namespace) + self.patch_applier = patch_applier if patch_applier is not None else PatchApplier(scope=self.scope) + self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(scope=self.scope) + self.patch_wrapper = patch_wrapper if patch_wrapper is not None else PatchWrapper(scope=self.scope) def replace(self, target_config): self.logger.log_notice("Config replacement starting.") @@ -150,7 +178,7 @@ def replace(self, target_config): self.logger.log_notice("Verifying config replacement is reflected on ConfigDB.") new_config = self.config_wrapper.get_config_db_as_json() - if not(self.patch_wrapper.verify_same_json(target_config, new_config)): + if not (self.patch_wrapper.verify_same_json(target_config, new_config)): raise GenericConfigUpdaterError(f"After replacing config, there is still some parts not updated") self.logger.log_notice("Config replacement completed.") @@ -161,23 +189,24 @@ def __init__(self, checkpoints_dir=CHECKPOINTS_DIR, config_replacer=None, config_wrapper=None, - namespace=multi_asic.DEFAULT_NAMESPACE): - self.namespace = namespace + scope=multi_asic.DEFAULT_NAMESPACE): + self.scope = scope self.logger = genericUpdaterLogging.get_logger(title="Config Rollbacker", print_all_to_console=True) + self.util = Util(checkpoints_dir=checkpoints_dir) self.checkpoints_dir = checkpoints_dir - self.config_replacer = config_replacer if config_replacer is not None else ConfigReplacer(namespace=self.namespace) - self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(namespace=self.namespace) + self.config_replacer = config_replacer if config_replacer is not None else ConfigReplacer(scope=self.scope) + self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(scope=self.scope) def rollback(self, checkpoint_name): self.logger.log_notice("Config rollbacking starting.") self.logger.log_notice(f"Checkpoint name: {checkpoint_name}.") self.logger.log_notice(f"Verifying '{checkpoint_name}' exists.") - if not self._check_checkpoint_exists(checkpoint_name): + if not self.util.check_checkpoint_exists(checkpoint_name): raise ValueError(f"Checkpoint '{checkpoint_name}' does not exist") self.logger.log_notice(f"Loading checkpoint into memory.") - target_config = self._get_checkpoint_content(checkpoint_name) + target_config = self.util.get_checkpoint_content(checkpoint_name) self.logger.log_notice(f"Replacing config using 'Config Replacer'.") self.config_replacer.replace(target_config) @@ -189,16 +218,16 @@ def checkpoint(self, checkpoint_name): self.logger.log_notice(f"Checkpoint name: {checkpoint_name}.") self.logger.log_notice("Getting current config db.") - json_content = self.config_wrapper.get_config_db_as_json() + json_content = get_config_json() self.logger.log_notice("Getting checkpoint full-path.") - path = self._get_checkpoint_full_path(checkpoint_name) + path = self.util.get_checkpoint_full_path(checkpoint_name) self.logger.log_notice("Ensuring checkpoint directory exist.") - self._ensure_checkpoints_dir_exists() + self.util.ensure_checkpoints_dir_exists() self.logger.log_notice(f"Saving config db content to {path}.") - self._save_json_file(path, json_content) + self.util.save_json_file(path, json_content) self.logger.log_notice("Config checkpoint completed.") @@ -206,12 +235,12 @@ def list_checkpoints(self): self.logger.log_info("Listing checkpoints starting.") self.logger.log_info(f"Verifying checkpoints directory '{self.checkpoints_dir}' exists.") - if not self._checkpoints_dir_exist(): + if not self.util.checkpoints_dir_exist(): self.logger.log_info("Checkpoints directory is empty, returning empty checkpoints list.") return [] self.logger.log_info("Getting checkpoints in checkpoints directory.") - checkpoint_names = self._get_checkpoint_names() + checkpoint_names = self.util.get_checkpoint_names() checkpoints_len = len(checkpoint_names) self.logger.log_info(f"Found {checkpoints_len} checkpoint{'s' if checkpoints_len != 1 else ''}{':' if checkpoints_len > 0 else '.'}") @@ -227,59 +256,139 @@ def delete_checkpoint(self, checkpoint_name): self.logger.log_notice(f"Checkpoint name: {checkpoint_name}.") self.logger.log_notice(f"Checking checkpoint exists.") - if not self._check_checkpoint_exists(checkpoint_name): + if not self.util.check_checkpoint_exists(checkpoint_name): raise ValueError(f"Checkpoint '{checkpoint_name}' does not exist") self.logger.log_notice(f"Deleting checkpoint.") - self._delete_checkpoint(checkpoint_name) + self.util.delete_checkpoint(checkpoint_name) self.logger.log_notice("Deleting checkpoint completed.") - def _ensure_checkpoints_dir_exists(self): + +class MultiASICConfigReplacer(ConfigReplacer): + def __init__(self, + patch_applier=None, + config_wrapper=None, + patch_wrapper=None, + scope=multi_asic.DEFAULT_NAMESPACE): + self.logger = genericUpdaterLogging.get_logger(title="MultiASICConfigReplacer", + print_all_to_console=True) + self.scopelist = [HOST_NAMESPACE, *multi_asic.get_namespace_list()] + super().__init__(patch_applier, config_wrapper, patch_wrapper, scope) + + def replace(self, target_config): + config_keys = set(target_config.keys()) + missing_scopes = set(self.scopelist) - config_keys + if missing_scopes: + raise GenericConfigUpdaterError(f"To be replace config is missing scope: {missing_scopes}") + + for scope in self.scopelist: + scope_config = target_config.pop(scope) + if scope.lower() == HOST_NAMESPACE: + scope = multi_asic.DEFAULT_NAMESPACE + ConfigReplacer(scope=scope).replace(scope_config) + + +class MultiASICConfigRollbacker(FileSystemConfigRollbacker): + def __init__(self, + checkpoints_dir=CHECKPOINTS_DIR, + config_replacer=None, + config_wrapper=None): + self.logger = genericUpdaterLogging.get_logger(title="MultiASICConfigRollbacker", + print_all_to_console=True) + self.scopelist = [HOST_NAMESPACE, *multi_asic.get_namespace_list()] + self.checkpoints_dir = checkpoints_dir + self.util = Util(checkpoints_dir=checkpoints_dir) + super().__init__(config_wrapper=config_wrapper, config_replacer=config_replacer) + + def rollback(self, checkpoint_name): + self.logger.log_notice("Config rollbacking starting.") + self.logger.log_notice(f"Checkpoint name: {checkpoint_name}.") + self.logger.log_notice(f"Verifying '{checkpoint_name}' exists.") + + if not self.util.check_checkpoint_exists(checkpoint_name): + raise ValueError(f"Checkpoint '{checkpoint_name}' does not exist") + + self.logger.log_notice(f"Loading checkpoint '{checkpoint_name}' into memory.") + target_config = self.util.get_checkpoint_content(checkpoint_name) + self.logger.log_notice(f"Replacing config '{checkpoint_name}' using 'Config Replacer'.") + + for scope in self.scopelist: + config = target_config.pop(scope) + if scope.lower() == HOST_NAMESPACE: + scope = multi_asic.DEFAULT_NAMESPACE + ConfigReplacer(scope=scope).replace(config) + + self.logger.log_notice("Config rollbacking completed.") + + def checkpoint(self, checkpoint_name): + all_configs = get_config_json() + self.logger.log_notice("Config checkpoint starting.") + self.logger.log_notice(f"Checkpoint name: {checkpoint_name}.") + + self.logger.log_notice("Getting checkpoint full-path.") + path = self.util.get_checkpoint_full_path(checkpoint_name) + + self.logger.log_notice("Ensuring checkpoint directory exist.") + self.util.ensure_checkpoints_dir_exists() + + self.logger.log_notice(f"Saving config db content to {path}.") + self.util.save_json_file(path, all_configs) + + self.logger.log_notice("Config checkpoint completed.") + + +class Util: + def __init__(self, checkpoints_dir=CHECKPOINTS_DIR): + self.checkpoints_dir = checkpoints_dir + + def ensure_checkpoints_dir_exists(self): os.makedirs(self.checkpoints_dir, exist_ok=True) - def _save_json_file(self, path, json_content): + def save_json_file(self, path, json_content): with open(path, "w") as fh: fh.write(json.dumps(json_content)) - def _get_checkpoint_content(self, checkpoint_name): - path = self._get_checkpoint_full_path(checkpoint_name) + def get_checkpoint_content(self, checkpoint_name): + path = self.get_checkpoint_full_path(checkpoint_name) with open(path) as fh: text = fh.read() return json.loads(text) - def _get_checkpoint_full_path(self, name): + def get_checkpoint_full_path(self, name): return os.path.join(self.checkpoints_dir, f"{name}{CHECKPOINT_EXT}") - def _get_checkpoint_names(self): + def get_checkpoint_names(self): file_names = [] for file_name in os.listdir(self.checkpoints_dir): if file_name.endswith(CHECKPOINT_EXT): # Remove extension from file name. # Example assuming ext is '.cp.json', then 'checkpoint1.cp.json' becomes 'checkpoint1' file_names.append(file_name[:-len(CHECKPOINT_EXT)]) - return file_names - def _checkpoints_dir_exist(self): + def checkpoints_dir_exist(self): return os.path.isdir(self.checkpoints_dir) - def _check_checkpoint_exists(self, name): - path = self._get_checkpoint_full_path(name) + def check_checkpoint_exists(self, name): + path = self.get_checkpoint_full_path(name) return os.path.isfile(path) - def _delete_checkpoint(self, name): - path = self._get_checkpoint_full_path(name) + def delete_checkpoint(self, name): + path = self.get_checkpoint_full_path(name) return os.remove(path) class Decorator(PatchApplier, ConfigReplacer, FileSystemConfigRollbacker): - def __init__(self, decorated_patch_applier=None, decorated_config_replacer=None, decorated_config_rollbacker=None, namespace=multi_asic.DEFAULT_NAMESPACE): + def __init__(self, + decorated_patch_applier=None, + decorated_config_replacer=None, + decorated_config_rollbacker=None, + scope=multi_asic.DEFAULT_NAMESPACE): # initing base classes to make LGTM happy - PatchApplier.__init__(self, namespace=namespace) - ConfigReplacer.__init__(self, namespace=namespace) - FileSystemConfigRollbacker.__init__(self, namespace=namespace) - + PatchApplier.__init__(self, scope=scope) + ConfigReplacer.__init__(self, scope=scope) + FileSystemConfigRollbacker.__init__(self, scope=scope) self.decorated_patch_applier = decorated_patch_applier self.decorated_config_replacer = decorated_config_replacer self.decorated_config_rollbacker = decorated_config_rollbacker @@ -304,10 +413,14 @@ def delete_checkpoint(self, checkpoint_name): class SonicYangDecorator(Decorator): - def __init__(self, patch_wrapper, config_wrapper, decorated_patch_applier=None, decorated_config_replacer=None, namespace=multi_asic.DEFAULT_NAMESPACE): - Decorator.__init__(self, decorated_patch_applier, decorated_config_replacer, namespace=namespace) - - self.namespace = namespace + def __init__(self, + patch_wrapper, + config_wrapper, + decorated_patch_applier=None, + decorated_config_replacer=None, + scope=multi_asic.DEFAULT_NAMESPACE): + Decorator.__init__(self, decorated_patch_applier, decorated_config_replacer, scope=scope) + self.scope = scope self.patch_wrapper = patch_wrapper self.config_wrapper = config_wrapper @@ -326,9 +439,12 @@ def __init__(self, decorated_config_replacer=None, decorated_config_rollbacker=None, config_lock=ConfigLock(), - namespace=multi_asic.DEFAULT_NAMESPACE): - Decorator.__init__(self, decorated_patch_applier, decorated_config_replacer, decorated_config_rollbacker, namespace=namespace) - + scope=multi_asic.DEFAULT_NAMESPACE): + Decorator.__init__(self, + decorated_patch_applier, + decorated_config_replacer, + decorated_config_rollbacker, + scope=scope) self.config_lock = config_lock def apply(self, patch, sort=True): @@ -350,20 +466,20 @@ def execute_write_action(self, action, *args): class GenericUpdateFactory: - def __init__(self, namespace=multi_asic.DEFAULT_NAMESPACE): - self.namespace = namespace + def __init__(self, scope=multi_asic.DEFAULT_NAMESPACE): + self.scope = scope def create_patch_applier(self, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_paths): self.init_verbose_logging(verbose) config_wrapper = self.get_config_wrapper(dry_run) change_applier = self.get_change_applier(dry_run, config_wrapper) - patch_wrapper = PatchWrapper(config_wrapper, namespace=self.namespace) + patch_wrapper = PatchWrapper(config_wrapper, scope=self.scope) patch_sorter = self.get_patch_sorter(ignore_non_yang_tables, ignore_paths, config_wrapper, patch_wrapper) patch_applier = PatchApplier(config_wrapper=config_wrapper, patchsorter=patch_sorter, patch_wrapper=patch_wrapper, changeapplier=change_applier, - namespace=self.namespace) + scope=self.scope) if config_format == ConfigFormat.CONFIGDB: pass @@ -371,62 +487,75 @@ def create_patch_applier(self, config_format, verbose, dry_run, ignore_non_yang_ patch_applier = SonicYangDecorator(decorated_patch_applier=patch_applier, patch_wrapper=patch_wrapper, config_wrapper=config_wrapper, - namespace=self.namespace) + scope=self.scope) else: raise ValueError(f"config-format '{config_format}' is not supported") if not dry_run: - patch_applier = ConfigLockDecorator(decorated_patch_applier=patch_applier, namespace=self.namespace) + patch_applier = ConfigLockDecorator(decorated_patch_applier=patch_applier, scope=self.scope) return patch_applier def create_config_replacer(self, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_paths): self.init_verbose_logging(verbose) - config_wrapper = self.get_config_wrapper(dry_run) change_applier = self.get_change_applier(dry_run, config_wrapper) - patch_wrapper = PatchWrapper(config_wrapper, namespace=self.namespace) + patch_wrapper = PatchWrapper(config_wrapper, scope=self.scope) patch_sorter = self.get_patch_sorter(ignore_non_yang_tables, ignore_paths, config_wrapper, patch_wrapper) patch_applier = PatchApplier(config_wrapper=config_wrapper, patchsorter=patch_sorter, patch_wrapper=patch_wrapper, changeapplier=change_applier, - namespace=self.namespace) + scope=self.scope) + if multi_asic.is_multi_asic(): + config_replacer = MultiASICConfigReplacer(patch_applier=patch_applier, + config_wrapper=config_wrapper) + else: + config_replacer = ConfigReplacer(patch_applier=patch_applier, + config_wrapper=config_wrapper, + scope=self.scope) - config_replacer = ConfigReplacer(patch_applier=patch_applier, config_wrapper=config_wrapper, namespace=self.namespace) if config_format == ConfigFormat.CONFIGDB: pass elif config_format == ConfigFormat.SONICYANG: config_replacer = SonicYangDecorator(decorated_config_replacer=config_replacer, patch_wrapper=patch_wrapper, config_wrapper=config_wrapper, - namespace=self.namespace) + scope=self.scope) else: raise ValueError(f"config-format '{config_format}' is not supported") if not dry_run: - config_replacer = ConfigLockDecorator(decorated_config_replacer=config_replacer, namespace=self.namespace) + config_replacer = ConfigLockDecorator(decorated_config_replacer=config_replacer, scope=self.scope) return config_replacer def create_config_rollbacker(self, verbose, dry_run=False, ignore_non_yang_tables=False, ignore_paths=[]): self.init_verbose_logging(verbose) - config_wrapper = self.get_config_wrapper(dry_run) change_applier = self.get_change_applier(dry_run, config_wrapper) - patch_wrapper = PatchWrapper(config_wrapper, namespace=self.namespace) + patch_wrapper = PatchWrapper(config_wrapper, scope=self.scope) patch_sorter = self.get_patch_sorter(ignore_non_yang_tables, ignore_paths, config_wrapper, patch_wrapper) patch_applier = PatchApplier(config_wrapper=config_wrapper, patchsorter=patch_sorter, patch_wrapper=patch_wrapper, changeapplier=change_applier, - namespace=self.namespace) - - config_replacer = ConfigReplacer(config_wrapper=config_wrapper, patch_applier=patch_applier, namespace=self.namespace) - config_rollbacker = FileSystemConfigRollbacker(config_wrapper=config_wrapper, config_replacer=config_replacer, namespace=self.namespace) + scope=self.scope) + if multi_asic.is_multi_asic(): + config_replacer = MultiASICConfigReplacer(config_wrapper=config_wrapper, + patch_applier=patch_applier) + config_rollbacker = MultiASICConfigRollbacker(config_wrapper=config_wrapper, + config_replacer=config_replacer) + else: + config_replacer = ConfigReplacer(config_wrapper=config_wrapper, + patch_applier=patch_applier, + scope=self.scope) + config_rollbacker = FileSystemConfigRollbacker(config_wrapper=config_wrapper, + config_replacer=config_replacer, + scope=self.scope) if not dry_run: - config_rollbacker = ConfigLockDecorator(decorated_config_rollbacker=config_rollbacker, namespace=self.namespace) + config_rollbacker = ConfigLockDecorator(decorated_config_rollbacker=config_rollbacker, scope=self.scope) return config_rollbacker @@ -435,15 +564,15 @@ def init_verbose_logging(self, verbose): def get_config_wrapper(self, dry_run): if dry_run: - return DryRunConfigWrapper(namespace=self.namespace) + return DryRunConfigWrapper(scope=self.scope) else: - return ConfigWrapper(namespace=self.namespace) + return ConfigWrapper(scope=self.scope) def get_change_applier(self, dry_run, config_wrapper): if dry_run: return DryRunChangeApplier(config_wrapper) else: - return ChangeApplier(namespace=self.namespace) + return ChangeApplier(scope=self.scope) def get_patch_sorter(self, ignore_non_yang_tables, ignore_paths, config_wrapper, patch_wrapper): if not ignore_non_yang_tables and not ignore_paths: @@ -462,9 +591,9 @@ def get_patch_sorter(self, ignore_non_yang_tables, ignore_paths, config_wrapper, class GenericUpdater: - def __init__(self, generic_update_factory=None, namespace=multi_asic.DEFAULT_NAMESPACE): + def __init__(self, generic_update_factory=None, scope=multi_asic.DEFAULT_NAMESPACE): self.generic_update_factory = \ - generic_update_factory if generic_update_factory is not None else GenericUpdateFactory(namespace=namespace) + generic_update_factory if generic_update_factory is not None else GenericUpdateFactory(scope=scope) def apply_patch(self, patch, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_paths, sort=True): patch_applier = self.generic_update_factory.create_patch_applier(config_format, verbose, dry_run, ignore_non_yang_tables, ignore_paths) diff --git a/generic_config_updater/gu_common.py b/generic_config_updater/gu_common.py index 974c540c07..452bad1ee7 100644 --- a/generic_config_updater/gu_common.py +++ b/generic_config_updater/gu_common.py @@ -16,6 +16,8 @@ SYSLOG_IDENTIFIER = "GenericConfigUpdater" SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) GCU_FIELD_OP_CONF_FILE = f"{SCRIPT_DIR}/gcu_field_operation_validators.conf.json" +HOST_NAMESPACE = "localhost" + class GenericConfigUpdaterError(Exception): pass @@ -52,8 +54,8 @@ def __eq__(self, other): return False class ConfigWrapper: - def __init__(self, yang_dir=YANG_DIR, namespace=multi_asic.DEFAULT_NAMESPACE): - self.namespace = namespace + def __init__(self, yang_dir=YANG_DIR, scope=multi_asic.DEFAULT_NAMESPACE): + self.scope = scope self.yang_dir = YANG_DIR self.sonic_yang_with_loaded_models = None @@ -64,8 +66,8 @@ def get_config_db_as_json(self): return config_db_json def _get_config_db_as_text(self): - if self.namespace is not None and self.namespace != multi_asic.DEFAULT_NAMESPACE: - cmd = ['sonic-cfggen', '-d', '--print-data', '-n', self.namespace] + if self.scope is not None and self.scope != multi_asic.DEFAULT_NAMESPACE: + cmd = ['sonic-cfggen', '-d', '--print-data', '-n', self.scope] else: cmd = ['sonic-cfggen', '-d', '--print-data'] @@ -73,7 +75,8 @@ def _get_config_db_as_text(self): text, err = result.communicate() return_code = result.returncode if return_code: # non-zero means failure - raise GenericConfigUpdaterError(f"Failed to get running config for namespace: {self.namespace}, Return code: {return_code}, Error: {err}") + raise GenericConfigUpdaterError(f"Failed to get running config for namespace: {self.scope}," + f" Return code: {return_code}, Error: {err}") return text def get_sonic_yang_as_json(self): @@ -236,7 +239,8 @@ def validate_lanes(self, config_db): for port in port_to_lanes_map: lanes = port_to_lanes_map[port] for lane in lanes: - if lane in existing: + # default lane would be 0, it does not need validate duplication. + if lane in existing and lane != '0': return False, f"'{lane}' lane is used multiple times in PORT: {set([port, existing[lane]])}" existing[lane] = port return True, None @@ -300,8 +304,8 @@ def create_sonic_yang_with_loaded_models(self): class DryRunConfigWrapper(ConfigWrapper): # This class will simulate all read/write operations to ConfigDB on a virtual storage unit. - def __init__(self, initial_imitated_config_db = None, namespace=multi_asic.DEFAULT_NAMESPACE): - super().__init__(namespace=namespace) + def __init__(self, initial_imitated_config_db=None, scope=multi_asic.DEFAULT_NAMESPACE): + super().__init__(scope=scope) self.logger = genericUpdaterLogging.get_logger(title="** DryRun", print_all_to_console=True) self.imitated_config_db = copy.deepcopy(initial_imitated_config_db) @@ -321,9 +325,9 @@ def _init_imitated_config_db_if_none(self): class PatchWrapper: - def __init__(self, config_wrapper=None, namespace=multi_asic.DEFAULT_NAMESPACE): - self.namespace = namespace - self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(self.namespace) + def __init__(self, config_wrapper=None, scope=multi_asic.DEFAULT_NAMESPACE): + self.scope = scope + self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(self.scope) self.path_addressing = PathAddressing(self.config_wrapper) def validate_config_db_patch_has_yang_models(self, patch): diff --git a/pfc/main.py b/pfc/main.py index b31d3c755e..f0b376e242 100644 --- a/pfc/main.py +++ b/pfc/main.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 - import click from swsscommon.swsscommon import ConfigDBConnector from tabulate import tabulate @@ -8,153 +7,167 @@ ALL_PRIORITIES = [str(x) for x in range(8)] PRIORITY_STATUS = ['on', 'off'] -def configPfcAsym(interface, pfc_asym): - """ - PFC handler to configure asymmentric PFC. - """ - configdb = ConfigDBConnector() - configdb.connect() - configdb.mod_entry("PORT", interface, {'pfc_asym': pfc_asym}) +class Pfc(object): + def __init__(self, cfgdb=None): + self.cfgdb = cfgdb + def configPfcAsym(self, interface, pfc_asym): + """ + PFC handler to configure asymmetric PFC. + """ + configdb = ConfigDBConnector() if self.cfgdb is None else self.cfgdb + configdb.connect() -def showPfcAsym(interface): - """ - PFC handler to display asymmetric PFC information. - """ - header = ('Interface', 'Asymmetric') + configdb.mod_entry("PORT", interface, {'pfc_asym': pfc_asym}) - configdb = ConfigDBConnector() - configdb.connect() + def showPfcAsym(self, interface): + """ + PFC handler to display asymmetric PFC information. + """ + header = ('Interface', 'Asymmetric') - if interface: - db_keys = configdb.keys(configdb.CONFIG_DB, 'PORT|{0}'.format(interface)) - else: - db_keys = configdb.keys(configdb.CONFIG_DB, 'PORT|*') + configdb = ConfigDBConnector() if self.cfgdb is None else self.cfgdb + configdb.connect() - table = [] - - for i in db_keys or [None]: - key = None - if i: - key = i.split('|')[-1] + if interface: + db_keys = configdb.keys(configdb.CONFIG_DB, 'PORT|{0}'.format(interface)) + else: + db_keys = configdb.keys(configdb.CONFIG_DB, 'PORT|*') - if key and key.startswith('Ethernet'): - entry = configdb.get_entry('PORT', key) - table.append([key, entry.get('pfc_asym', 'N/A')]) + table = [] - sorted_table = natsorted(table) + for i in db_keys or [None]: + key = None + if i: + key = i.split('|')[-1] - click.echo() - click.echo(tabulate(sorted_table, headers=header, tablefmt="simple", missingval="")) - click.echo() + if key and key.startswith('Ethernet'): + entry = configdb.get_entry('PORT', key) + table.append([key, entry.get('pfc_asym', 'N/A')]) -def configPfcPrio(status, interface, priority): - configdb = ConfigDBConnector() - configdb.connect() + sorted_table = natsorted(table) - if interface not in configdb.get_keys('PORT_QOS_MAP'): - click.echo('Cannot find interface {0}'.format(interface)) - return + click.echo() + click.echo(tabulate(sorted_table, headers=header, tablefmt="simple", missingval="")) + click.echo() - """Current lossless priorities on the interface""" - entry = configdb.get_entry('PORT_QOS_MAP', interface) - enable_prio = entry.get('pfc_enable').split(',') - - """Avoid '' in enable_prio""" - enable_prio = [x.strip() for x in enable_prio if x.strip()] - - if status == 'on' and priority in enable_prio: - click.echo('Priority {0} has already been enabled on {1}'.format(priority, interface)) - return - - if status == 'off' and priority not in enable_prio: - click.echo('Priority {0} is not enabled on {1}'.format(priority, interface)) - return - - if status == 'on': - enable_prio.append(priority) - - else: - enable_prio.remove(priority) - - enable_prio.sort() - configdb.mod_entry("PORT_QOS_MAP", interface, {'pfc_enable': ','.join(enable_prio)}) - - """Show the latest PFC configuration""" - showPfcPrio(interface) - -def showPfcPrio(interface): - """ - PFC handler to display PFC enabled priority information. - """ - header = ('Interface', 'Lossless priorities') - table = [] + def configPfcPrio(self, status, interface, priority): + configdb = ConfigDBConnector() if self.cfgdb is None else self.cfgdb + configdb.connect() + + if interface not in configdb.get_keys('PORT_QOS_MAP'): + click.echo('Cannot find interface {0}'.format(interface)) + return + + """Current lossless priorities on the interface""" + entry = configdb.get_entry('PORT_QOS_MAP', interface) + enable_prio = entry.get('pfc_enable').split(',') + + """Avoid '' in enable_prio""" + enable_prio = [x.strip() for x in enable_prio if x.strip()] + + if status == 'on' and priority in enable_prio: + click.echo('Priority {0} has already been enabled on {1}'.format(priority, interface)) + return + + if status == 'off' and priority not in enable_prio: + click.echo('Priority {0} is not enabled on {1}'.format(priority, interface)) + return + + if status == 'on': + enable_prio.append(priority) + + else: + enable_prio.remove(priority) + + enable_prio.sort() + configdb.mod_entry("PORT_QOS_MAP", interface, {'pfc_enable': ','.join(enable_prio)}) + + """Show the latest PFC configuration""" + self.showPfcPrio(interface) - configdb = ConfigDBConnector() - configdb.connect() - - """Get all the interfaces with QoS map information""" - intfs = configdb.get_keys('PORT_QOS_MAP') - - """The user specifies an interface but we cannot find it""" - if interface and interface not in intfs: - click.echo('Cannot find interface {0}'.format(interface)) - return - - if interface: - intfs = [interface] - - for intf in intfs: - entry = configdb.get_entry('PORT_QOS_MAP', intf) - table.append([intf, entry.get('pfc_enable', 'N/A')]) - - sorted_table = natsorted(table) - click.echo() - click.echo(tabulate(sorted_table, headers=header, tablefmt="simple", missingval="")) - click.echo() + def showPfcPrio(self, interface): + """ + PFC handler to display PFC enabled priority information. + """ + header = ('Interface', 'Lossless priorities') + table = [] + + configdb = ConfigDBConnector() if self.cfgdb is None else self.cfgdb + configdb.connect() + + """Get all the interfaces with QoS map information""" + intfs = configdb.get_keys('PORT_QOS_MAP') + + """The user specifies an interface but we cannot find it""" + if interface and interface not in intfs: + click.echo('Cannot find interface {0}'.format(interface)) + return + + if interface: + intfs = [interface] + + for intf in intfs: + entry = configdb.get_entry('PORT_QOS_MAP', intf) + table.append([intf, entry.get('pfc_enable', 'N/A')]) + + sorted_table = natsorted(table) + click.echo() + click.echo(tabulate(sorted_table, headers=header, tablefmt="simple", missingval="")) + click.echo() @click.group() -def cli(): +@click.pass_context +def cli(ctx): """PFC Command Line""" - pass + # Use the cfgdb object if given as input. + cfgdb = None if ctx.obj is None else ctx.obj.cfgdb + + ctx.obj = {'pfc': Pfc(cfgdb)} @cli.group() -def config(): +@click.pass_context +def config(ctx): """Config PFC""" pass @cli.group() -def show(): +@click.pass_context +def show(ctx): """Show PFC information""" pass @click.command() @click.argument('status', type=click.Choice(PRIORITY_STATUS)) @click.argument('interface', type=click.STRING) -def configAsym(status, interface): +@click.pass_context +def configAsym(ctx, status, interface): """Configure asymmetric PFC on a given port.""" - configPfcAsym(interface, status) + ctx.obj['pfc'].configPfcAsym(interface, status) @click.command() @click.argument('status', type=click.Choice(PRIORITY_STATUS)) @click.argument('interface', type=click.STRING) @click.argument('priority', type=click.Choice(ALL_PRIORITIES)) -def configPrio(status, interface, priority): +@click.pass_context +def configPrio(ctx, status, interface, priority): """Configure PFC on a given priority.""" - configPfcPrio(status, interface, priority) - + ctx.obj['pfc'].configPfcPrio(status, interface, priority) + @click.command() @click.argument('interface', type=click.STRING, required=False) -def showAsym(interface): +@click.pass_context +def showAsym(ctx, interface): """Show asymmetric PFC information""" - showPfcAsym(interface) + ctx.obj['pfc'].showPfcAsym(interface) @click.command() @click.argument('interface', type=click.STRING, required=False) -def showPrio(interface): +@click.pass_context +def showPrio(ctx, interface): """Show PFC priority information""" - showPfcPrio(interface) + ctx.obj['pfc'].showPfcPrio(interface) config.add_command(configAsym, "asymmetric") config.add_command(configPrio, "priority") diff --git a/rcli/linecard.py b/rcli/linecard.py index 73c13a73ef..f893428a42 100644 --- a/rcli/linecard.py +++ b/rcli/linecard.py @@ -8,7 +8,7 @@ import termios import tty -from .utils import get_linecard_ip +from .utils import get_linecard_ip, get_linecard_hostname_from_module_name, get_linecard_module_name_from_hostname from paramiko.py3compat import u from paramiko import Channel @@ -31,7 +31,17 @@ def __init__(self, linecard_name, username, password): if not self.ip: sys.exit(1) - self.linecard_name = linecard_name + # if the user passes linecard hostname, then try to get the module name for that linecard + module_name = get_linecard_module_name_from_hostname(linecard_name) + if module_name is None: + # if the module name cannot be found from host, assume the user has passed module name + self.module_name = linecard_name + self.hostname = get_linecard_hostname_from_module_name(linecard_name) + else: + # the user has passed linecard hostname + self.hostname = linecard_name + self.module_name = module_name + self.username = username self.password = password diff --git a/rcli/rexec.py b/rcli/rexec.py index 8831d5585f..21929c8012 100644 --- a/rcli/rexec.py +++ b/rcli/rexec.py @@ -30,20 +30,22 @@ def cli(linecard_names, command, username): if list(linecard_names) == ["all"]: # Get all linecard names using autocompletion helper - linecard_names = rcli_utils.get_all_linecards(None, None, "") + module_names = sorted(rcli_utils.get_all_linecards(None, None, "")) + else: + module_names = linecard_names linecards = [] # Iterate through each linecard, check if the login was successful - for linecard_name in linecard_names: - linecard = Linecard(linecard_name, username, password) + for module_name in module_names: + linecard = Linecard(module_name, username, password) if not linecard.connection: - click.echo(f"Failed to connect to {linecard_name} with username {username}") + click.echo(f"Failed to connect to {module_name} with username {username}") sys.exit(1) linecards.append(linecard) for linecard in linecards: if linecard.connection: - click.echo(f"======== {linecard.linecard_name} output: ========") + click.echo(f"======== {linecard.module_name}|{linecard.hostname} output: ========") click.echo(linecard.execute_cmd(command)) diff --git a/rcli/rshell.py b/rcli/rshell.py index bac02d42d8..b22187a0f3 100644 --- a/rcli/rshell.py +++ b/rcli/rshell.py @@ -28,14 +28,14 @@ def cli(linecard_name, username): try: linecard = Linecard(linecard_name, username, password) if linecard.connection: - click.echo(f"Connecting to {linecard.linecard_name}") + click.echo(f"Connecting to {linecard.module_name}") # If connection was created, connection exists. # Otherwise, user will see an error message. linecard.start_shell() click.echo("Connection Closed") except paramiko.ssh_exception.AuthenticationException: click.echo( - f"Login failed on '{linecard.linecard_name}' with username '{linecard.username}'") + f"Login failed on '{linecard.module_name}' with username '{linecard.username}'") if __name__=="__main__": diff --git a/rcli/utils.py b/rcli/utils.py index 510e360581..7563eafdcd 100644 --- a/rcli/utils.py +++ b/rcli/utils.py @@ -1,7 +1,7 @@ import click -from getpass import getpass +import getpass import os -import sys +import signal from swsscommon.swsscommon import SonicV2Connector @@ -19,6 +19,8 @@ CHASSIS_MODULE_HOSTNAME_TABLE = 'CHASSIS_MODULE_HOSTNAME_TABLE' CHASSIS_MODULE_HOSTNAME = 'module_hostname' +GET_PASSWORD_TIMEOUT = 10 + def connect_to_chassis_state_db(): chassis_state_db = SonicV2Connector(host="127.0.0.1") chassis_state_db.connect(chassis_state_db.CHASSIS_STATE_DB) @@ -43,6 +45,20 @@ def get_linecard_module_name_from_hostname(linecard_name: str): return None + +def get_linecard_hostname_from_module_name(linecard_name: str): + + chassis_state_db = connect_to_chassis_state_db() + keys = chassis_state_db.keys(chassis_state_db.CHASSIS_STATE_DB, '{}|{}'.format(CHASSIS_MODULE_HOSTNAME_TABLE, '*')) + for key in keys: + module_name = key.split('|')[1] + if module_name.replace('-', '').lower() == linecard_name.replace('-', '').lower(): + hostname = chassis_state_db.get(chassis_state_db.CHASSIS_STATE_DB, key, CHASSIS_MODULE_HOSTNAME) + return hostname + + return None + + def get_linecard_ip(linecard_name: str): """ Given a linecard name, lookup its IP address in the midplane table @@ -69,6 +85,7 @@ def get_linecard_ip(linecard_name: str): return None return module_ip + def get_module_ip_and_access_from_state_db(module_name): state_db = connect_state_db() data_dict = state_db.get_all( @@ -136,8 +153,17 @@ def get_password(username=None): if username is None: username = os.getlogin() - return getpass( + def get_password_timeout(*args): + print("\nAborted! Timeout when waiting for password input.") + exit(1) + + signal.signal(signal.SIGALRM, get_password_timeout) + signal.alarm(GET_PASSWORD_TIMEOUT) # Set a timeout of 60 seconds + password = getpass.getpass( "Password for username '{}': ".format(username), # Pass in click stdout stream - this is similar to using click.echo stream=click.get_text_stream('stdout') ) + signal.alarm(0) # Cancel the alarm + + return password diff --git a/scripts/db_migrator.py b/scripts/db_migrator.py index afd5e638de..9be3ce325b 100755 --- a/scripts/db_migrator.py +++ b/scripts/db_migrator.py @@ -58,7 +58,7 @@ def __init__(self, namespace, socket=None): none-zero values. build: sequentially increase within a minor version domain. """ - self.CURRENT_VERSION = 'version_202405_01' + self.CURRENT_VERSION = 'version_202411_01' self.TABLE_NAME = 'VERSIONS' self.TABLE_KEY = 'DATABASE' @@ -1228,10 +1228,18 @@ def version_202311_03(self): def version_202405_01(self): """ - Version 202405_01, this version should be the final version for - master branch until 202405 branch is created. + Version 202405_01. """ log.log_info('Handling version_202405_01') + self.set_version('version_202411_01') + return 'version_202411_01' + + def version_202411_01(self): + """ + Version 202411_01, this version should be the final version for + master branch until 202411 branch is created. + """ + log.log_info('Handling version_202411_01') return None def get_version(self): diff --git a/scripts/decode-syseeprom b/scripts/decode-syseeprom index 3d0b8d1db9..5812f38190 100755 --- a/scripts/decode-syseeprom +++ b/scripts/decode-syseeprom @@ -17,13 +17,15 @@ import sys import sonic_platform from sonic_platform_base.sonic_eeprom.eeprom_tlvinfo import TlvInfoDecoder -from sonic_py_common import device_info +from sonic_py_common import device_info, logger from swsscommon.swsscommon import SonicV2Connector from tabulate import tabulate EEPROM_INFO_TABLE = 'EEPROM_INFO' +SYSLOG_IDENTIFIER = 'decode-syseeprom' +log = logger.Logger(SYSLOG_IDENTIFIER) def instantiate_eeprom_object(): eeprom = None diff --git a/scripts/dropconfig b/scripts/dropconfig index 180c6166c6..1fc812a474 100755 --- a/scripts/dropconfig +++ b/scripts/dropconfig @@ -105,7 +105,7 @@ class DropConfig(object): if supported_reasons and int(capabilities.get('count', 0)) > 0: print('\n{}'.format(counter)) for reason in supported_reasons: - print('\t{}'.format(reason)) + print(' {}'.format(reason)) def create_counter(self, counter_name, alias, group, counter_type, description, reasons): diff --git a/scripts/dropstat b/scripts/dropstat index 4e9f5bb4d0..219ad2b494 100755 --- a/scripts/dropstat +++ b/scripts/dropstat @@ -11,8 +11,8 @@ # - Refactor calls to COUNTERS_DB to reduce redundancy # - Cache DB queries to reduce # of expensive queries +import click import json -import argparse import os import socket import sys @@ -20,6 +20,9 @@ import sys from collections import OrderedDict from natsort import natsorted from tabulate import tabulate +from sonic_py_common import multi_asic +from utilities_common.general import load_db_config +import utilities_common.multi_asic as multi_asic_util # mock the redis for unit test purposes # try: @@ -28,9 +31,14 @@ try: test_path = os.path.join(modules_path, "tests") sys.path.insert(0, modules_path) sys.path.insert(0, test_path) - import mock_tables.dbconnector + from tests.mock_tables import dbconnector socket.gethostname = lambda: 'sonic_drops_test' os.getuid = lambda: 27 + if os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] == "multi_asic": + import tests.mock_tables.mock_multi_asic + dbconnector.load_namespace_config() + else: + dbconnector.load_database_config() except KeyError: pass @@ -43,6 +51,7 @@ DEBUG_COUNTER_PORT_STAT_MAP = 'COUNTERS_DEBUG_NAME_PORT_STAT_MAP' DEBUG_COUNTER_SWITCH_STAT_MAP = 'COUNTERS_DEBUG_NAME_SWITCH_STAT_MAP' COUNTERS_PORT_NAME_MAP = 'COUNTERS_PORT_NAME_MAP' COUNTER_TABLE_PREFIX = 'COUNTERS:' +SWITCH_LEVEL_COUNTER_PREFIX = 'SWITCH_STD_DROP_COUNTER-' # ASIC_DB Tables ASIC_SWITCH_INFO_PREFIX = 'ASIC_STATE:SAI_OBJECT_TYPE_SWITCH:' @@ -79,34 +88,43 @@ std_port_headers_map = { # Standard Switch-Level Headers std_switch_description_header = ['DEVICE'] +std_switch_dflt_drop_headers= [ 'SWITCH-ID'] +std_switch_drop_headers_map = { + 'SAI_SWITCH_STAT_PACKET_INTEGRITY_DROP': 'PKT_INTEGRITY_ERR' +} def get_dropstat_dir(): return UserCache().get_directory() class DropStat(object): - def __init__(self): - self.config_db = ConfigDBConnector() - self.config_db.connect() - - self.db = SonicV2Connector(use_unix_socket_path=False) - self.db.connect(self.db.COUNTERS_DB) - self.db.connect(self.db.ASIC_DB) - self.db.connect(self.db.APPL_DB) + def __init__(self, namespace): + self.namespaces = multi_asic.get_namespace_list(namespace) + self.multi_asic = multi_asic_util.MultiAsic(namespace_option=namespace) + self.db = None + self.config_db = None + self.cached_namespace = None dropstat_dir = get_dropstat_dir() self.port_drop_stats_file = os.path.join(dropstat_dir, 'port-stats') - self.switch_drop_stats_file = os.path.join(dropstat_dir + 'switch-stats') + self.switch_drop_stats_file = os.path.join(dropstat_dir, 'switch-stats') + self.switch_std_drop_stats_file = os.path.join(dropstat_dir, 'switch-std-drop-stats') self.stat_lookup = {} self.reverse_stat_lookup = {} + @multi_asic_util.run_on_multi_asic def show_drop_counts(self, group, counter_type): """ Prints out the current drop counts at the port-level and switch-level. """ + if os.environ.get("UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE", "0") == "1": + # Temp cache needs to be cleard to avoid interference from previous test cases + UserCache().remove() + + self.show_switch_std_drop_counts(group, counter_type) self.show_port_drop_counts(group, counter_type) print('') self.show_switch_drop_counts(group, counter_type) @@ -116,16 +134,91 @@ class DropStat(object): Clears the current drop counts. """ + counters_port_drop = {} + counters_switch_drop = {} + counters_switch_std_drop = {} + for ns in self.namespaces: + self.config_db = multi_asic.connect_config_db_for_ns(ns) + self.db = multi_asic.connect_to_all_dbs_for_ns(ns) + + counts = self.get_counts_table(self.gather_counters(std_port_rx_counters + std_port_tx_counters, DEBUG_COUNTER_PORT_STAT_MAP), COUNTERS_PORT_NAME_MAP) + if counts: + counters_port_drop.update(counts) + + counters = self.gather_counters([], DEBUG_COUNTER_SWITCH_STAT_MAP) + if counters: + counts = self.get_counts(counters, self.get_switch_id()) + counters_switch_drop.update(counts) + + counters = self.get_configured_counters(DEBUG_COUNTER_SWITCH_STAT_MAP, True) + if counters: + counts = self.get_counts(counters, self.get_switch_id()) + counters_switch_std_drop.update(counts) + try: - json.dump(self.get_counts_table(self.gather_counters(std_port_rx_counters + std_port_tx_counters, DEBUG_COUNTER_PORT_STAT_MAP), COUNTERS_PORT_NAME_MAP), - open(self.port_drop_stats_file, 'w+')) - json.dump(self.get_counts(self.gather_counters([], DEBUG_COUNTER_SWITCH_STAT_MAP), self.get_switch_id()), - open(self.switch_drop_stats_file, 'w+')) + if counters_port_drop: + json.dump(counters_port_drop, open(self.port_drop_stats_file, 'w+')) + + if counters_switch_drop: + json.dump(counters_switch_drop, open(self.switch_drop_stats_file, 'w+')) + + if counters_switch_std_drop: + json.dump(counters_switch_std_drop, open(self.switch_std_drop_stats_file, 'w+')) except IOError as e: print(e) sys.exit(e.errno) print("Cleared drop counters") + def show_switch_std_drop_counts(self, group, counter_type): + """ + Prints out the standard drop counts (packet integrity drop etc) at the switch level, if such counts exist. + """ + + if group is not None or counter_type is not None: + return + + #Currently the switch drop counter (packet integrity) is supported only for chassis. + if os.environ.get("VOQ_DROP_COUNTER_TESTING", "0") == "1": + #fake the switch_type for mock-test code coverage + switch_type = "voq" + else: + switch_type = self.db.get(self.db.CONFIG_DB, "DEVICE_METADATA|localhost", "switch_type") + + if switch_type is None: + return + if switch_type != "fabric" and switch_type != "voq": + return + + switch_std_drop_ckpt = {} + + # Grab the latest clear checkpoint, if it exists + if os.path.isfile(self.switch_std_drop_stats_file): + switch_std_drop_ckpt = json.load(open(self.switch_std_drop_stats_file, 'r')) + + counters = self.get_configured_counters(DEBUG_COUNTER_SWITCH_STAT_MAP, True) + if not counters: + return + switch_id = self.get_switch_id() + switch_std_stats = self.get_counts(counters, switch_id) + + if not switch_std_stats: + return + + if os.environ.get("VOQ_DROP_COUNTER_TESTING", "0") == "1": + row = [socket.gethostname()] + else: + cfg_switch_id = self.db.get(self.db.CONFIG_DB, "DEVICE_METADATA|localhost", "switch_id") + row = [cfg_switch_id] + + headers = std_switch_dflt_drop_headers + for cntr in counters: + if cntr in std_switch_drop_headers_map: + row.append(switch_std_stats.get(cntr, 0) - switch_std_drop_ckpt.get(cntr, 0)) + headers.append(std_switch_drop_headers_map[cntr]) + if row: + print(tabulate([row], headers, tablefmt='simple', stralign='right')) + print('') + def show_port_drop_counts(self, group, counter_type): """ Prints out the drop counts at the port level, if such counts exist. @@ -189,7 +282,7 @@ class DropStat(object): the group or not the right counter type. """ - configured_counters = self.get_configured_counters(object_stat_map) + configured_counters = self.get_configured_counters(object_stat_map, False) counters = std_counters + configured_counters return [ctr for ctr in counters if self.in_group(ctr, object_stat_map, group) and @@ -258,12 +351,13 @@ class DropStat(object): the given object type. """ + if self.cached_namespace != self.multi_asic.current_namespace: + self.stat_lookup = {} + self.cached_namespace = self.multi_asic.current_namespace + if not self.stat_lookup.get(object_stat_map, None): stats_map = self.db.get_all(self.db.COUNTERS_DB, object_stat_map) - if stats_map: - self.stat_lookup[object_stat_map] = stats_map - else: - self.stat_lookup[object_stat_map] = None + self.stat_lookup[object_stat_map] = stats_map if stats_map else None return self.stat_lookup[object_stat_map] @@ -282,7 +376,7 @@ class DropStat(object): return self.reverse_stat_lookup[object_stat_map] - def get_configured_counters(self, object_stat_map): + def get_configured_counters(self, object_stat_map, std_switch_cntr=False): """ Returns the list of counters that have been configured to track packet drops. @@ -294,6 +388,15 @@ class DropStat(object): if not counters: return configured_counters + #Switch level standard drop counters are added by default and added to DEBUG_COUNTER_SWITCH_STAT_MAP table, + #so remove it from configrued counters + if object_stat_map == DEBUG_COUNTER_SWITCH_STAT_MAP: + if std_switch_cntr: + new_cntrs = {k:counters[k] for k in counters if SWITCH_LEVEL_COUNTER_PREFIX in k} + else: + new_cntrs = {k:counters[k] for k in counters if not SWITCH_LEVEL_COUNTER_PREFIX in k} + return list(new_cntrs.values()) + return list(counters.values()) def get_counter_name(self, object_stat_map, counter_stat): @@ -385,39 +488,22 @@ class DropStat(object): else: return PORT_STATE_NA - -def main(): - parser = argparse.ArgumentParser(description='Display drop counters', - formatter_class=argparse.RawTextHelpFormatter, - epilog=""" -Examples: - dropstat -""") - - # Version - parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0') - - # Actions - parser.add_argument('-c', '--command', type=str, help='Desired action to perform') - - # Variables - parser.add_argument('-g', '--group', type=str, help='The group of the target drop counter', default=None) - parser.add_argument('-t', '--type', type=str, help='The type of the target drop counter', default=None) - - args = parser.parse_args() - - command = args.command - - group = args.group - counter_type = args.type - - dcstat = DropStat() +@click.command(help='Display drop counters') +@click.option('-c', '--command', required=True, help='Desired action to perform', + type=click.Choice(['clear', 'show'], case_sensitive=False)) +@click.option('-g', '--group', default=None, help='The group of the target drop counter') +@click.option('-t', '--type', 'counter_type', default=None, help='The type of the target drop counter') +@click.option('-n', '--namespace', help='Namespace name', default=None, + type=click.Choice(multi_asic.get_namespace_list())) +@click.version_option(version='1.0') +def main(command, group, counter_type, namespace): + load_db_config() + + dcstat = DropStat(namespace) if command == 'clear': dcstat.clear_drop_counts() - elif command == 'show': - dcstat.show_drop_counts(group, counter_type) else: - print("Command not recognized") + dcstat.show_drop_counts(group, counter_type) if __name__ == '__main__': diff --git a/scripts/ecnconfig b/scripts/ecnconfig index e3b08d2bd3..9b2deab4dc 100755 --- a/scripts/ecnconfig +++ b/scripts/ecnconfig @@ -5,7 +5,7 @@ ecnconfig is the utility to 1) show and change ECN configuration -usage: ecnconfig [-h] [-v] [-l] [-p PROFILE] [-gmin GREEN_MIN] +usage: ecnconfig [-h] [-v] [-l] [-p PROFILE] [-gmin GREEN_MIN] [-n NAMESPACE] [-gmax GREEN_MAX] [-ymin YELLOW_MIN] [-ymax YELLOW_MAX] [-rmin RED_MIN] [-rmax RED_MAX] [-gdrop GREEN_DROP_PROB] [-ydrop YELLOW_DROP_PROB] [-rdrop RED_DROP_PROB] [-vv] @@ -16,6 +16,7 @@ optional arguments: -vv --verbose verbose output -l --list show ECN WRED configuration -p --profile specify WRED profile name + -n --namespace show ECN configuration for specified namespace -gmin --green-min set min threshold for packets marked green -gmax --green-max set max threshold for packets marked green -ymin --yellow-min set min threshold for packets marked yellow @@ -47,7 +48,7 @@ $ecnconfig -q 3 ECN status: queue 3: on """ -import argparse +import click import json import os import sys @@ -62,12 +63,17 @@ try: sys.path.insert(0, modules_path) sys.path.insert(0, tests_path) import mock_tables.dbconnector - + if os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] == "multi_asic": + import mock_tables.mock_multi_asic + mock_tables.dbconnector.load_namespace_config() except KeyError: pass from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector +from sonic_py_common import multi_asic +from utilities_common import multi_asic as multi_asic_util +from utilities_common.general import load_db_config WRED_PROFILE_TABLE_NAME = "WRED_PROFILE" WRED_CONFIG_FIELDS = { @@ -82,7 +88,6 @@ WRED_CONFIG_FIELDS = { "rdrop": "red_drop_probability" } -PORT_TABLE_NAME = "PORT" QUEUE_TABLE_NAME = "QUEUE" DEVICE_NEIGHBOR_TABLE_NAME = "DEVICE_NEIGHBOR" FIELD = "wred_profile" @@ -96,18 +101,25 @@ class EcnConfig(object): """ Process ecnconfig """ - def __init__(self, filename, verbose): + def __init__(self, test_filename, verbose, namespace): self.ports = [] self.queues = [] - self.filename = filename self.verbose = verbose + self.namespace = namespace + self.multi_asic = multi_asic_util.MultiAsic(namespace_option=namespace) + self.config_db = None + self.num_wred_profiles = 0 - # Set up db connections - self.db = ConfigDBConnector() - self.db.connect() + # For unit testing + self.test_filename = test_filename + self.updated_profile_tables = {} + @multi_asic_util.run_on_multi_asic def list(self): - wred_profiles = self.db.get_table(WRED_PROFILE_TABLE_NAME) + """ + List all WRED profiles. + """ + wred_profiles = self.config_db.get_table(WRED_PROFILE_TABLE_NAME) for name, data in wred_profiles.items(): profile_name = name profile_data = data @@ -117,12 +129,18 @@ class EcnConfig(object): line = [field, value] config.append(line) print(tabulate(config) + "\n") - if self.verbose: - print("Total profiles: %d" % len(wred_profiles)) + self.num_wred_profiles += len(wred_profiles) - # get parameters of a WRED profile def get_profile_data(self, profile): - wred_profiles = self.db.get_table(WRED_PROFILE_TABLE_NAME) + """ + Get parameters of a WRED profile + """ + if self.namespace or not multi_asic.is_multi_asic(): + db = ConfigDBConnector(namespace=self.namespace) + db.connect() + wred_profiles = db.get_table(WRED_PROFILE_TABLE_NAME) + else: + wred_profiles = multi_asic.get_table(WRED_PROFILE_TABLE_NAME) for profile_name, profile_data in wred_profiles.items(): if profile_name == profile: @@ -131,6 +149,9 @@ class EcnConfig(object): return None def validate_profile_data(self, profile_data): + """ + Validate threshold, probability and color values. + """ result = True # check if thresholds are non-negative integers @@ -168,73 +189,116 @@ class EcnConfig(object): return result + @multi_asic_util.run_on_multi_asic def set_wred_threshold(self, profile, threshold, value): + """ + Single asic behaviour: + Set threshold value on default namespace + + Multi asic behaviour: + Set threshold value on the specified namespace. + If no namespace is provided, set on all namespaces. + """ chk_exec_privilege() + # Modify the threshold field = WRED_CONFIG_FIELDS[threshold] if self.verbose: - print("Setting %s value to %s" % (field, value)) - self.db.mod_entry(WRED_PROFILE_TABLE_NAME, profile, {field: value}) - if self.filename is not None: - prof_table = self.db.get_table(WRED_PROFILE_TABLE_NAME) - with open(self.filename, "w") as fd: - json.dump(prof_table, fd) + namespace_str = f" for namespace {self.multi_asic.current_namespace}" if multi_asic.is_multi_asic() else '' + print("Setting %s value to %s%s" % (field, value, namespace_str)) + self.config_db.mod_entry(WRED_PROFILE_TABLE_NAME, profile, {field: value}) + + # Record the change for unit testing + if self.test_filename: + profile_table = self.config_db.get_table(WRED_PROFILE_TABLE_NAME) + if self.multi_asic.current_namespace in self.updated_profile_tables.keys(): + self.updated_profile_tables[self.multi_asic.current_namespace][profile][threshold] = value + else: + self.updated_profile_tables[self.multi_asic.current_namespace] = profile_table + @multi_asic_util.run_on_multi_asic def set_wred_prob(self, profile, drop_color, value): + """ + Single asic behaviour: + Set drop probability on default namespace + + Multi asic behaviour: + Set drop probability value on the specified namespace. + If no namespace is provided, set on all namespaces. + """ chk_exec_privilege() + # Modify the drop probability field = WRED_CONFIG_FIELDS[drop_color] if self.verbose: - print("Setting %s value to %s%%" % (field, value)) - self.db.mod_entry(WRED_PROFILE_TABLE_NAME, profile, {field: value}) - if self.filename is not None: - prof_table = self.db.get_table(WRED_PROFILE_TABLE_NAME) - with open(self.filename, "w") as fd: - json.dump(prof_table, fd) + namespace_str = f" for namespace {self.multi_asic.current_namespace}" if multi_asic.is_multi_asic() else '' + print("Setting %s value to %s%%%s" % (field, value, namespace_str)) + self.config_db.mod_entry(WRED_PROFILE_TABLE_NAME, profile, {field: value}) + + # Record the change for unit testing + if self.test_filename: + profile_table = self.config_db.get_table(WRED_PROFILE_TABLE_NAME) + if self.multi_asic.current_namespace in self.updated_profile_tables.keys(): + self.updated_profile_tables[self.multi_asic.current_namespace][profile][field] = value + else: + self.updated_profile_tables[self.multi_asic.current_namespace] = profile_table class EcnQ(object): """ Process ecn on/off on queues """ - def __init__(self, queues, filename, verbose): + def __init__(self, queues, test_filename, verbose, namespace): self.ports_key = [] self.queues = queues.split(',') - self.filename = filename self.verbose = verbose + self.namespace = namespace + self.multi_asic = multi_asic_util.MultiAsic(namespace_option=namespace) + self.config_db = None + self.db = None - # Set up db connections - self.config_db = ConfigDBConnector() - self.config_db.connect() - - self.db = SonicV2Connector(use_unix_socket_path=False) - self.db.connect(self.db.CONFIG_DB) - - self.gen_ports_key() + # For unit testing + self.test_filename = test_filename + self.updated_q_table = {} def gen_ports_key(self): - if self.ports_key is not None: - port_table = self.config_db.get_table(DEVICE_NEIGHBOR_TABLE_NAME) - self.ports_key = list(port_table.keys()) + port_table = self.config_db.get_table(DEVICE_NEIGHBOR_TABLE_NAME) + self.ports_key = list(port_table.keys()) - # Verify at least one port is available - if len(self.ports_key) == 0: - raise Exception("No active ports detected in table '{}'".format(DEVICE_NEIGHBOR_TABLE_NAME)) + # Verify at least one port is available + if len(self.ports_key) == 0: + raise Exception("No active ports detected in table '{}'".format(DEVICE_NEIGHBOR_TABLE_NAME)) - # In multi-ASIC platforms backend ethernet ports are identified as - # 'Ethernet-BPxy'. Add 1024 to sort backend ports to the end. - self.ports_key.sort( - key = lambda k: int(k[8:]) if "BP" not in k else int(k[11:]) + 1024 - ) + # In multi-ASIC platforms backend ethernet ports are identified as + # 'Ethernet-BPxy'. Add 1024 to sort backend ports to the end. + self.ports_key.sort( + key = lambda k: int(k[8:]) if "BP" not in k else int(k[11:]) + 1024 + ) def dump_table_info(self): - if self.filename is not None: + """ + A function to dump updated queue tables. + These JSON dumps are used exclusively by unit tests. + The tables are organized by namespaces for multi-asic support. + """ + if self.test_filename is not None: q_table = self.config_db.get_table(QUEUE_TABLE_NAME) - with open(self.filename, "w") as fd: - json.dump({repr(x):y for x, y in q_table.items()}, fd) + with open(self.test_filename, "w") as fd: + self.updated_q_table[self.multi_asic.current_namespace] = {repr(x):y for x, y in q_table.items()} + json.dump(self.updated_q_table, fd) + @multi_asic_util.run_on_multi_asic def set(self, enable): + """ + Single asic behaviour: + Enable or disable queues on default namespace + + Multi asic behaviour: + Enable or disable queues on a specified namespace. + If no namespace is provided, set on all namespaces. + """ chk_exec_privilege() + self.gen_ports_key() for queue in self.queues: if self.verbose: print("%s ECN on %s queue %s" % ("Enable" if enable else "Disable", ','.join(self.ports_key), queue)) @@ -252,10 +316,24 @@ class EcnQ(object): self.config_db.mod_entry(QUEUE_TABLE_NAME, key, None) else: self.config_db.set_entry(QUEUE_TABLE_NAME, key, entry) + # For unit testing self.dump_table_info() + @multi_asic_util.run_on_multi_asic def get(self): - print("ECN status:") + """ + Single asic behaviour: + Get status of queues on default namespace + + Multi asic behaviour: + Get status of queues on a specified namespace. + If no namespace is provided, get queue status on all namespaces. + """ + self.gen_ports_key() + namespace = self.multi_asic.current_namespace + namespace_str = f" for namespace {namespace}" if namespace else '' + print(f"ECN status{namespace_str}:") + for queue in self.queues: out = ' '.join(['queue', queue]) if self.verbose: @@ -270,81 +348,77 @@ class EcnQ(object): print("%s: on" % (out)) else: print("%s: off" % (out)) + # For unit testing self.dump_table_info() -def main(): - parser = argparse.ArgumentParser(description='Show and change:\n' - '1) ECN WRED configuration\n' - '2) ECN on/off status on queues', - formatter_class=argparse.RawTextHelpFormatter) - - parser.add_argument('-l', '--list', action='store_true', help='show ECN WRED configuration') - parser.add_argument('-p', '--profile', type=str, help='specify WRED profile name', default=None) - parser.add_argument('-gmin', '--green-min', type=str, help='set min threshold for packets marked \'green\'', default=None) - parser.add_argument('-gmax', '--green-max', type=str, help='set max threshold for packets marked \'green\'', default=None) - parser.add_argument('-ymin', '--yellow-min', type=str, help='set min threshold for packets marked \'yellow\'', default=None) - parser.add_argument('-ymax', '--yellow-max', type=str, help='set max threshold for packets marked \'yellow\'', default=None) - parser.add_argument('-rmin', '--red-min', type=str, help='set min threshold for packets marked \'red\'', default=None) - parser.add_argument('-rmax', '--red-max', type=str, help='set max threshold for packets marked \'red\'', default=None) - parser.add_argument('-gdrop', '--green-drop-prob', type=str, help='set max drop/mark probability for packets marked \'green\'', default=None) - parser.add_argument('-ydrop', '--yellow-drop-prob', type=str, help='set max drop/mark probability for packets marked \'yellow\'', default=None) - parser.add_argument('-rdrop', '--red-drop-prob', type=str, help='set max drop/mark probability for packets marked \'red\'', default=None) - parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0') - parser.add_argument('-vv', '--verbose', action='store_true', help='Verbose output', default=False) - - parser.add_argument('command', nargs='?', choices=['on', 'off'], type=str, help='turn on/off ecn', default=None) - parser.add_argument('-q', '--queue', type=str, help='specify queue index list: 3,4', default=None) - parser.add_argument('-f', '--filename', help='file used by mock tests', type=str, default=None) - +@click.command(help='Show and change: ECN WRED configuration\nECN on/off status on queues') +@click.argument('command', type=click.Choice(['on', 'off'], case_sensitive=False), required=False, default=None) +@click.option('-l', '--list', 'show_config', is_flag=True, help='show ECN WRED configuration') +@click.option('-p', '--profile', type=str, help='specify WRED profile name', default=None) +@click.option('-gmin', '--green-min', type=str, help='set min threshold for packets marked \'green\'', default=None) +@click.option('-gmax', '--green-max', type=str, help='set max threshold for packets marked \'green\'', default=None) +@click.option('-ymin', '--yellow-min', type=str, help='set min threshold for packets marked \'yellow\'', default=None) +@click.option('-ymax', '--yellow-max', type=str, help='set max threshold for packets marked \'yellow\'', default=None) +@click.option('-rmin', '--red-min', type=str, help='set min threshold for packets marked \'red\'', default=None) +@click.option('-rmax', '--red-max', type=str, help='set max threshold for packets marked \'red\'', default=None) +@click.option('-gdrop', '--green-drop-prob', type=str, help='set max drop/mark probability for packets marked \'green\'', default=None) +@click.option('-ydrop', '--yellow-drop-prob', type=str, help='set max drop/mark probability for packets marked \'yellow\'', default=None) +@click.option('-rdrop', '--red-drop-prob', type=str, help='set max drop/mark probability for packets marked \'red\'', default=None) +@click.option('-n', '--namespace', type=click.Choice(multi_asic.get_namespace_list()), help='Namespace name or skip for all', default=None) +@click.option('-vv', '--verbose', is_flag=True, help='Verbose output', default=False) +@click.option('-q', '--queue', type=str, help='specify queue index list: 3,4', default=None) +@click.version_option(version='1.0') +def main(command, show_config, profile, green_min, + green_max, yellow_min, yellow_max, red_min, + red_max, green_drop_prob, yellow_drop_prob, + red_drop_prob, namespace, verbose, queue): + test_filename = None if os.environ.get("UTILITIES_UNIT_TESTING", "0") == "2": - sys.argv.extend(['-f', '/tmp/ecnconfig']) - - args = parser.parse_args() + test_filename = '/tmp/ecnconfig' try: - if args.list or args.profile: - prof_cfg = EcnConfig(args.filename, args.verbose) - if args.list: - arg_len_max = 2 - if args.verbose: - arg_len_max += 1 - if args.filename: - arg_len_max += 2 - if len(sys.argv) > arg_len_max: + load_db_config() + if show_config or profile: + # Check if a set option has been provided + setOption = (green_min or green_max or yellow_min or yellow_max or red_min or red_max + or green_drop_prob or yellow_drop_prob or red_drop_prob) + + prof_cfg = EcnConfig(test_filename, verbose, namespace) + if show_config: + if setOption: raise Exception("Input arguments error. No set options allowed when -l[ist] specified") + prof_cfg.list() - elif args.profile: - arg_len_min = 4 - if args.verbose: - arg_len_min += 1 - if args.filename: - arg_len_min += 2 - if len(sys.argv) < arg_len_min: + if verbose: + print("Total profiles: %d" % prof_cfg.num_wred_profiles) + + elif profile: + if not setOption: raise Exception("Input arguments error. Specify at least one threshold parameter to set") # get current configuration data - wred_profile_data = prof_cfg.get_profile_data(args.profile) + wred_profile_data = prof_cfg.get_profile_data(profile) if wred_profile_data is None: - raise Exception("Input arguments error. Invalid WRED profile %s" % (args.profile)) - - if args.green_max: - wred_profile_data[WRED_CONFIG_FIELDS["gmax"]] = args.green_max - if args.green_min: - wred_profile_data[WRED_CONFIG_FIELDS["gmin"]] = args.green_min - if args.yellow_max: - wred_profile_data[WRED_CONFIG_FIELDS["ymax"]] = args.yellow_max - if args.yellow_min: - wred_profile_data[WRED_CONFIG_FIELDS["ymin"]] = args.yellow_min - if args.red_max: - wred_profile_data[WRED_CONFIG_FIELDS["rmax"]] = args.red_max - if args.red_min: - wred_profile_data[WRED_CONFIG_FIELDS["rmin"]] = args.red_min - if args.green_drop_prob: - wred_profile_data[WRED_CONFIG_FIELDS["gdrop"]] = args.green_drop_prob - if args.yellow_drop_prob: - wred_profile_data[WRED_CONFIG_FIELDS["ydrop"]] = args.yellow_drop_prob - if args.red_drop_prob: - wred_profile_data[WRED_CONFIG_FIELDS["rdrop"]] = args.red_drop_prob + raise Exception("Input arguments error. Invalid WRED profile %s for namespace %s" % (profile, namespace)) + + if green_max: + wred_profile_data[WRED_CONFIG_FIELDS["gmax"]] = green_max + if green_min: + wred_profile_data[WRED_CONFIG_FIELDS["gmin"]] = green_min + if yellow_max: + wred_profile_data[WRED_CONFIG_FIELDS["ymax"]] = yellow_max + if yellow_min: + wred_profile_data[WRED_CONFIG_FIELDS["ymin"]] = yellow_min + if red_max: + wred_profile_data[WRED_CONFIG_FIELDS["rmax"]] = red_max + if red_min: + wred_profile_data[WRED_CONFIG_FIELDS["rmin"]] = red_min + if green_drop_prob: + wred_profile_data[WRED_CONFIG_FIELDS["gdrop"]] = green_drop_prob + if yellow_drop_prob: + wred_profile_data[WRED_CONFIG_FIELDS["ydrop"]] = yellow_drop_prob + if red_drop_prob: + wred_profile_data[WRED_CONFIG_FIELDS["rdrop"]] = red_drop_prob # validate new configuration data if prof_cfg.validate_profile_data(wred_profile_data) == False: @@ -352,41 +426,39 @@ def main(): # apply new configuration # the following parameters can be combined in one run - if args.green_max: - prof_cfg.set_wred_threshold(args.profile, "gmax", args.green_max) - if args.green_min: - prof_cfg.set_wred_threshold(args.profile, "gmin", args.green_min) - if args.yellow_max: - prof_cfg.set_wred_threshold(args.profile, "ymax", args.yellow_max) - if args.yellow_min: - prof_cfg.set_wred_threshold(args.profile, "ymin", args.yellow_min) - if args.red_max: - prof_cfg.set_wred_threshold(args.profile, "rmax", args.red_max) - if args.red_min: - prof_cfg.set_wred_threshold(args.profile, "rmin", args.red_min) - if args.green_drop_prob: - prof_cfg.set_wred_prob(args.profile, "gdrop", args.green_drop_prob) - if args.yellow_drop_prob: - prof_cfg.set_wred_prob(args.profile, "ydrop", args.yellow_drop_prob) - if args.red_drop_prob: - prof_cfg.set_wred_prob(args.profile, "rdrop", args.red_drop_prob) - - elif args.queue: - arg_len_min = 3 - if args.filename: - arg_len_min += 1 - if args.verbose: - arg_len_min += 1 - if len(sys.argv) < arg_len_min: + if green_max: + prof_cfg.set_wred_threshold(profile, "gmax", green_max) + if green_min: + prof_cfg.set_wred_threshold(profile, "gmin", green_min) + if yellow_max: + prof_cfg.set_wred_threshold(profile, "ymax", yellow_max) + if yellow_min: + prof_cfg.set_wred_threshold(profile, "ymin", yellow_min) + if red_max: + prof_cfg.set_wred_threshold(profile, "rmax", red_max) + if red_min: + prof_cfg.set_wred_threshold(profile, "rmin", red_min) + if green_drop_prob: + prof_cfg.set_wred_prob(profile, "gdrop", green_drop_prob) + if yellow_drop_prob: + prof_cfg.set_wred_prob(profile, "ydrop", yellow_drop_prob) + if red_drop_prob: + prof_cfg.set_wred_prob(profile, "rdrop", red_drop_prob) + + # Dump the current config in the file for unit tests + if test_filename: + with open(test_filename, "w") as fd: + json.dump(prof_cfg.updated_profile_tables, fd) + + elif queue: + if queue.split(',') == ['']: raise Exception("Input arguments error. Specify at least one queue by index") - - q_ecn = EcnQ(args.queue, args.filename, args.verbose) - if not args.command: + q_ecn = EcnQ(queue, test_filename, verbose, namespace) + if command is None: q_ecn.get() else: - q_ecn.set(enable = True if args.command == 'on' else False) + q_ecn.set(enable = True if command == 'on' else False) else: - parser.print_help() sys.exit(1) except Exception as e: diff --git a/scripts/fabricstat b/scripts/fabricstat index cf3d14bf5e..6f1893c9db 100755 --- a/scripts/fabricstat +++ b/scripts/fabricstat @@ -399,6 +399,49 @@ class FabricIsolation(FabricStat): print(tabulate(body, header, tablefmt='simple', stralign='right')) return +class FabricRate(FabricStat): + def rate_print(self): + # Connect to database + self.db = multi_asic.connect_to_all_dbs_for_ns(self.namespace) + # Get the set of all fabric ports + port_keys = self.db.keys(self.db.STATE_DB, FABRIC_PORT_STATUS_TABLE_PREFIX + '*') + # Create a new dictionary. The keys are the local port values in integer format. + # Only fabric ports that have remote port data are added. + port_dict = {} + for port_key in port_keys: + port_data = self.db.get_all(self.db.STATE_DB, port_key) + port_number = int(port_key.replace("FABRIC_PORT_TABLE|PORT", "")) + port_dict.update({port_number: port_data}) + # Create ordered table of fabric ports. + rxRate = 0 + rxData = 0 + txRate = 0 + txData = 0 + time = 0 + local_time = "" + # RX data , Tx data , Time are for testing + asic = "asic0" + if self.namespace: + asic = self.namespace + header = ["ASIC", "Link ID", "Rx Data Mbps", "Tx Data Mbps"] + body = [] + for port_number in sorted(port_dict.keys()): + port_data = port_dict[port_number] + if "OLD_RX_RATE_AVG" in port_data: + rxRate = port_data["OLD_RX_RATE_AVG"] + if "OLD_RX_DATA" in port_data: + rxData = port_data["OLD_RX_DATA"] + if "OLD_TX_RATE_AVG" in port_data: + txRate = port_data["OLD_TX_RATE_AVG"] + if "OLD_TX_DATA" in port_data: + txData = port_data["OLD_TX_DATA"] + if "LAST_TIME" in port_data: + time = int(port_data["LAST_TIME"]) + local_time = datetime.fromtimestamp(time) + body.append((asic, port_number, rxRate, txRate)); + click.echo() + click.echo(tabulate(body, header, tablefmt='simple', stralign='right')) + def main(): global cnstat_dir global cnstat_fqn_file_port @@ -415,6 +458,8 @@ Examples: fabricstat -q -n asic0 fabricstat -c fabricstat -c -n asic0 + fabricstat -s + fabricstat -s -n asic0 fabricstat -C fabricstat -D """) @@ -425,6 +470,7 @@ Examples: parser.add_argument('-e', '--errors', action='store_true', help='Display errors') parser.add_argument('-c','--capacity',action='store_true', help='Display fabric capacity') parser.add_argument('-i','--isolation', action='store_true', help='Display fabric ports isolation status') + parser.add_argument('-s','--rate', action='store_true', help='Display fabric counters rate') parser.add_argument('-C','--clear', action='store_true', help='Copy & clear fabric counters') parser.add_argument('-D','--delete', action='store_true', help='Delete saved stats') @@ -433,6 +479,7 @@ Examples: reachability = args.reachability capacity_status = args.capacity isolation_status = args.isolation + rate = args.rate namespace = args.namespace errors_only = args.errors @@ -455,17 +502,21 @@ Examples: def nsStat(ns, errors_only): if queue: - stat = FabricQueueStat(ns) + stat = FabricQueueStat(ns) elif reachability: - stat = FabricReachability(ns) - stat.reachability_print() - return + stat = FabricReachability(ns) + stat.reachability_print() + return elif isolation_status: - stat = FabricIsolation(ns) - stat.isolation_print() - return + stat = FabricIsolation(ns) + stat.isolation_print() + return + elif rate: + stat = FabricRate(ns) + stat.rate_print() + return else: - stat = FabricPortStat(ns) + stat = FabricPortStat(ns) cnstat_dict = stat.get_cnstat_dict() if save_fresh_stats: stat.save_fresh_stats() @@ -489,7 +540,10 @@ Examples: stat = FabricCapacity(namespace, table_cnt, threshold) stat.capacity_print() - click.echo("Monitored fabric capacity threshold: {}".format(threshold[0])) + print_th = "" + if threshold: + print_th = threshold[0] + click.echo("Monitored fabric capacity threshold: {}".format(print_th)) click.echo() click.echo(tabulate(table_cnt, capacity_header, tablefmt='simple', stralign='right')) else: diff --git a/scripts/fast-reboot b/scripts/fast-reboot index 91791b3771..e183c34219 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -50,6 +50,7 @@ EXIT_NO_CONTROL_PLANE_ASSISTANT=20 EXIT_SONIC_INSTALLER_VERIFY_REBOOT=21 EXIT_PLATFORM_FW_AU_FAILURE=22 EXIT_TEAMD_RETRY_COUNT_FAILURE=23 +EXIT_NO_MIRROR_SESSION_ACLS=24 function error() { @@ -146,7 +147,7 @@ function clear_boot() # common_clear debug "${REBOOT_TYPE} failure ($?) cleanup ..." - /sbin/kexec -u || /bin/true + /sbin/kexec -u -a || /bin/true teardown_control_plane_assistant @@ -243,18 +244,42 @@ function wait_for_pre_shutdown_complete_or_fail() function backup_database() { debug "Backing up database ..." + + if [[ "$REBOOT_TYPE" = "fastfast-reboot" || "$REBOOT_TYPE" = "fast-reboot" ]]; then + # Advanced reboot: dump state to host disk + sonic-db-cli ASIC_DB FLUSHDB > /dev/null + sonic-db-cli COUNTERS_DB FLUSHDB > /dev/null + sonic-db-cli FLEX_COUNTER_DB FLUSHDB > /dev/null + fi + + if [[ "$REBOOT_TYPE" = "fast-reboot" ]]; then + # Flush RESTAP_DB in fast-reboot to avoid stale status + sonic-db-cli RESTAPI_DB FLUSHDB > /dev/null + fi + # Dump redis content to a file 'dump.rdb' in warmboot directory mkdir -p $WARM_DIR # Delete keys in stateDB except FDB_TABLE|*, MIRROR_SESSION_TABLE|*, WARM_RESTART_ENABLE_TABLE|*, FG_ROUTE_TABLE|* sonic-db-cli STATE_DB eval " for _, k in ipairs(redis.call('keys', '*')) do - if not string.match(k, 'FDB_TABLE|') and not string.match(k, 'WARM_RESTART_TABLE|') \ + if string.match(k, 'PORT_TABLE|Ethernet') then + for i, f in ipairs(redis.call('hgetall', k)) do + if i % 2 == 1 then + if not string.match(f, 'host_tx_ready') \ + and not string.match(f, 'NPU_SI_SETTINGS_SYNC_STATUS') \ + and not string.match(f, 'CMIS_REINIT_REQUIRED') then + redis.call('hdel', k, f) + end + end + end + elseif not string.match(k, 'FDB_TABLE|') and not string.match(k, 'WARM_RESTART_TABLE|') \ and not string.match(k, 'MIRROR_SESSION_TABLE|') \ and not string.match(k, 'FG_ROUTE_TABLE|') \ and not string.match(k, 'WARM_RESTART_ENABLE_TABLE|') \ and not string.match(k, 'TRANSCEIVER_INFO|') \ and not string.match(k, 'VXLAN_TUNNEL_TABLE|') \ and not string.match(k, 'BUFFER_MAX_PARAM_TABLE|') \ + and not string.match(k, 'STORAGE_INFO|') \ and not string.match(k, 'FAST_RESTART_ENABLE_TABLE|') then redis.call('del', k) end @@ -272,6 +297,47 @@ function backup_database() fi } +function check_mirror_session_acls() +{ + debug "Checking if mirror session ACLs (arp, nd) programmed to ASIC successfully" + ACL_ARP="missing" + ACL_ND="missing" + start_time=${SECONDS} + elapsed_time=$((${SECONDS} - ${start_time})) + while [[ ${elapsed_time} -lt 10 ]]; do + CHECK_ACL_ENTRIES=0 + ACL_OUTPUT=$(sonic-db-cli ASIC_DB KEYS "*" | grep SAI_OBJECT_TYPE_ACL_ENTRY) || CHECK_ACL_ENTRIES=$? + if [[ ${CHECK_ACL_ENTRIES} -ne 0 ]]; then + error "Failed to retrieve SAI_OBJECT_TYPE_ACL_ENTRY from redis" + exit ${EXIT_NO_MIRROR_SESSION_ACLS} + fi + ACL_ENTRIES=( ${ACL_OUTPUT} ) + if [[ ${#ACL_ENTRIES[@]} -eq 0 ]]; then + error "NO SAI_OBJECT_TYPE_ACL_ENTRY objects found" + exit ${EXIT_NO_MIRROR_SESSION_ACLS} + fi + for ACL_ENTRY in ${ACL_ENTRIES[@]}; do + ACL_PRIORITY=$(sonic-db-cli ASIC_DB HGET ${ACL_ENTRY} SAI_ACL_ENTRY_ATTR_PRIORITY) + if [[ ${ACL_PRIORITY} -eq 8888 ]]; then + ACL_ARP="found" + fi + if [[ ${ACL_PRIORITY} -eq 8887 ]]; then + ACL_ND="found" + fi + done + if [[ "${ACL_ARP}" = "found" && "${ACL_ND}" = "found" ]]; then + break + fi + sleep 0.1 + elapsed_time=$((${SECONDS} - ${start_time})) + done + if [[ "${ACL_ARP}" != "found" || "${ACL_ND}" != "found" ]]; then + debug "Failed to program mirror session ACLs on ASIC. ACLs: ARP=${ACL_ARP} ND=${ACL_ND}" + exit ${EXIT_NO_MIRROR_SESSION_ACLS} + fi + debug "Mirror session ACLs (arp, nd) programmed to ASIC successfully" +} + function setup_control_plane_assistant() { if [[ -n "${ASSISTANT_IP_LIST}" && -x ${ASSISTANT_SCRIPT} ]]; then @@ -279,6 +345,7 @@ function setup_control_plane_assistant() if [[ "${HWSKU}" != "DellEMC-Z9332f-M-O16C64" && "${HWSKU}" != "DellEMC-Z9332f-M-O16C64-lab" ]]; then debug "Setting up control plane assistant: ${ASSISTANT_IP_LIST} ..." ${ASSISTANT_SCRIPT} -s ${ASSISTANT_IP_LIST} -m set + check_mirror_session_acls else debug "${HWSKU} Not capable to support CPA. Skipping gracefully ..." fi @@ -452,7 +519,7 @@ function unload_kernel() { # Unload the previously loaded kernel if any loaded if [[ "$(cat /sys/kernel/kexec_loaded)" -eq 1 ]]; then - /sbin/kexec -u + /sbin/kexec -u -a fi } @@ -752,23 +819,11 @@ for service in ${SERVICES_TO_STOP}; do wait_for_pre_shutdown_complete_or_fail fi - if [[ "$REBOOT_TYPE" = "fastfast-reboot" || "$REBOOT_TYPE" = "fast-reboot" ]]; then - # Advanced reboot: dump state to host disk - sonic-db-cli ASIC_DB FLUSHDB > /dev/null - sonic-db-cli COUNTERS_DB FLUSHDB > /dev/null - sonic-db-cli FLEX_COUNTER_DB FLUSHDB > /dev/null - fi - - if [[ "$REBOOT_TYPE" = "fast-reboot" ]]; then - # Flush RESTAP_DB in fast-reboot to avoid stale status - sonic-db-cli RESTAPI_DB FLUSHDB > /dev/null - fi - - backup_database - fi done +backup_database + # Stop the docker container engine. Otherwise we will have a broken docker storage systemctl stop docker.service || debug "Ignore stopping docker service error $?" @@ -810,7 +865,6 @@ if [[ -x ${DEVPATH}/${PLATFORM}/${PLATFORM_FWUTIL_AU_REBOOT_HANDLE} ]]; then fi fi - # Enable Watchdog Timer if [ -x ${WATCHDOG_UTIL} ]; then debug "Enabling Watchdog before ${REBOOT_TYPE}" diff --git a/scripts/generate_dump b/scripts/generate_dump index 06d163a45e..3d0ef3430d 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -1120,7 +1120,6 @@ save_file() { find_files() { trap 'handle_error $? $LINENO' ERR local -r directory=$1 - $TOUCH --date="${SINCE_DATE}" "${REFERENCE_FILE}" local -r find_command="find -L $directory -type f -newer ${REFERENCE_FILE}" echo $($find_command) @@ -1914,6 +1913,8 @@ main() { ${CMD_PREFIX}renice +5 -p $$ >> /dev/null ${CMD_PREFIX}ionice -c 2 -n 5 -p $$ >> /dev/null + # Created file as a reference to compare modification time + $TOUCH --date="${SINCE_DATE}" "${REFERENCE_FILE}" $MKDIR $V -p $TARDIR # Start with this script so its obvious what code is responsible @@ -2155,7 +2156,7 @@ finalize() { ############################################################################### -# Remove secret from pipeline inout and output result to pipeline. +# Remove secret from pipeline input and output result to pipeline. # Globals: # None # Arguments: @@ -2168,6 +2169,18 @@ remove_secret_from_config_db_dump() { sed -E 's/\"passkey\"\s*:\s*\"([^\"]*)\"/\"passkey\":\"****\"/g; /SNMP_COMMUNITY/,/\s{2,4}\},/d' } + +############################################################################### +# Remove secret from file. +############################################################################### +remove_secret_from_config_db_dump_file() { + local dumpfile=$1 + if [ -e ${dumpfile} ]; then + cat $dumpfile | remove_secret_from_config_db_dump > $dumpfile.temp + mv $dumpfile.temp $dumpfile + fi +} + ############################################################################### # Remove secret from dump files. # Globals: @@ -2201,8 +2214,24 @@ remove_secret_from_etc_files() { sed -i -E 's/(\s*snmp_\S*community\s*:\s*)(\S*)/\1****/g' $dumppath/etc/sonic/snmp.yml # Remove secret from /etc/sonic/config_db.json - cat $dumppath/etc/sonic/config_db.json | remove_secret_from_config_db_dump > $dumppath/etc/sonic/config_db.json.temp - mv $dumppath/etc/sonic/config_db.json.temp $dumppath/etc/sonic/config_db.json + remove_secret_from_config_db_dump_file $dumppath/etc/sonic/config_db.json + + # Remove secret from /etc/sonic/golden_config_db.json + remove_secret_from_config_db_dump_file $dumppath/etc/sonic/golden_config_db.json + + # Remove secret from /etc/sonic/old_config/ + + # Remove snmp community string from old_config/snmp.yml + local oldsnmp=${dumppath}/etc/sonic/old_config/snmp.yml + if [ -e ${oldsnmp} ]; then + sed -i -E 's/(\s*snmp_\S*community\s*:\s*)(\S*)/\1****/g' $oldsnmp + fi + + # Remove secret from /etc/sonic/config_db.json + remove_secret_from_config_db_dump_file ${dumppath}/etc/sonic/old_config/config_db.json + + # Remove secret from /etc/sonic/golden_config_db.json + remove_secret_from_config_db_dump_file ${dumppath}/etc/sonic/old_config/golden_config_db.json } ############################################################################### diff --git a/scripts/lldpshow b/scripts/lldpshow index e09176cf3c..fe40296f91 100755 --- a/scripts/lldpshow +++ b/scripts/lldpshow @@ -26,8 +26,9 @@ import sys from lxml import etree as ET from sonic_py_common import device_info +from utilities_common import constants from swsscommon.swsscommon import ConfigDBConnector -from utilities_common.general import load_db_config +from utilities_common.general import load_db_config, get_feature_state_data from tabulate import tabulate BACKEND_ASIC_INTERFACE_NAME_PREFIX = 'Ethernet-BP' @@ -69,8 +70,12 @@ class Lldpshow(object): self.lldp_interface[instance_num] += key + SPACE_TOKEN # LLDP running in host namespace - self.lldp_instance.append(LLDP_INSTANCE_IN_HOST_NAMESPACE) - self.lldp_interface.append(LLDP_INTERFACE_LIST_IN_HOST_NAMESPACE) + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=constants.DEFAULT_NAMESPACE) + config_db.connect() + global_scope, asic_scope = get_feature_state_data(config_db, "lldp") + if global_scope == "True": + self.lldp_instance.append(LLDP_INSTANCE_IN_HOST_NAMESPACE) + self.lldp_interface.append(LLDP_INTERFACE_LIST_IN_HOST_NAMESPACE) def get_info(self, lldp_detail_info, lldp_port): """ @@ -85,7 +90,7 @@ class Lldpshow(object): elif lldp_interface_list == '': lldp_args = [] else: - lldp_args = [lldp_interface_list] + lldp_args = lldp_interface_list.split(' ') lldp_cmd = ['sudo', 'docker', 'exec', '-i', 'lldp{}'.format(self.lldp_instance[lldp_instace_num]), 'lldpctl'] + lldp_args p = subprocess.Popen(lldp_cmd, stdout=subprocess.PIPE, text=True) (output, err) = p.communicate() diff --git a/scripts/pg-drop b/scripts/pg-drop index 7741593081..9078d28ad6 100755 --- a/scripts/pg-drop +++ b/scripts/pg-drop @@ -5,6 +5,7 @@ # pg-drop is a tool for show/clear ingress pg dropped packet stats. # ##################################################################### +from importlib import reload import json import argparse import os @@ -13,6 +14,8 @@ from collections import OrderedDict from natsort import natsorted from tabulate import tabulate +from utilities_common.general import load_db_config +from sonic_py_common import multi_asic # mock the redis for unit test purposes # try: @@ -22,7 +25,9 @@ try: sys.path.insert(0, modules_path) sys.path.insert(0, tests_path) import mock_tables.dbconnector - + if os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] == "multi_asic": + import mock_tables.mock_multi_asic + mock_tables.dbconnector.load_namespace_config() except KeyError: pass @@ -43,13 +48,11 @@ def get_dropstat_dir(): class PgDropStat(object): - def __init__(self): - self.counters_db = SonicV2Connector(host='127.0.0.1') - self.counters_db.connect(self.counters_db.COUNTERS_DB) - - self.configdb = ConfigDBConnector() + def __init__(self, namespace): + self.namespace = namespace + self.ns_list = multi_asic.get_namespace_list(namespace) + self.configdb = ConfigDBConnector(namespace=namespace) self.configdb.connect() - dropstat_dir = get_dropstat_dir() self.port_drop_stats_file = os.path.join(dropstat_dir, 'pg_drop_stats') @@ -57,14 +60,14 @@ class PgDropStat(object): """ Get port ID using object ID """ - port_id = self.counters_db.get(self.counters_db.COUNTERS_DB, COUNTERS_PG_PORT_MAP, oid) + port_id = self.get_counters_mapdata(COUNTERS_PG_PORT_MAP, oid) if not port_id: print("Port is not available for oid '{}'".format(oid)) sys.exit(1) return port_id # Get all ports - self.counter_port_name_map = self.counters_db.get_all(self.counters_db.COUNTERS_DB, COUNTERS_PORT_NAME_MAP) + self.counter_port_name_map = self.get_counters_mapall(COUNTERS_PORT_NAME_MAP) if not self.counter_port_name_map: print("COUNTERS_PORT_NAME_MAP is empty!") sys.exit(1) @@ -77,7 +80,7 @@ class PgDropStat(object): self.port_name_map[self.counter_port_name_map[port]] = port # Get PGs for each port - counter_pg_name_map = self.counters_db.get_all(self.counters_db.COUNTERS_DB, COUNTERS_PG_NAME_MAP) + counter_pg_name_map = self.get_counters_mapall(COUNTERS_PG_NAME_MAP) if not counter_pg_name_map: print("COUNTERS_PG_NAME_MAP is empty!") sys.exit(1) @@ -94,13 +97,32 @@ class PgDropStat(object): "header_prefix": "PG"}, } + def get_counters_mapdata(self, tablemap, index): + for ns in self.ns_list: + counters_db = SonicV2Connector(namespace=ns) + counters_db.connect(counters_db.COUNTERS_DB) + data = counters_db.get(counters_db.COUNTERS_DB, tablemap, index) + if data: + return data + return None + + def get_counters_mapall(self, tablemap): + mapdata = {} + for ns in self.ns_list: + counters_db = SonicV2Connector(namespace=ns) + counters_db.connect(counters_db.COUNTERS_DB) + map_result = counters_db.get_all(counters_db.COUNTERS_DB, tablemap) + if map_result: + mapdata.update(map_result) + return mapdata + def get_pg_index(self, oid): """ return PG index (0-7) oid - object ID for entry in redis """ - pg_index = self.counters_db.get(self.counters_db.COUNTERS_DB, COUNTERS_PG_INDEX_MAP, oid) + pg_index = self.get_counters_mapdata(COUNTERS_PG_INDEX_MAP, oid) if not pg_index: print("Priority group index is not available for oid '{}'".format(oid)) sys.exit(1) @@ -154,7 +176,7 @@ class PgDropStat(object): old_collected_data = port_drop_ckpt.get(name,{})[full_table_id] if len(port_drop_ckpt) > 0 else 0 idx = int(idx_func(obj_id)) pos = self.header_idx_to_pos[idx] - counter_data = self.counters_db.get(self.counters_db.COUNTERS_DB, full_table_id, counter_name) + counter_data = self.get_counters_mapdata(full_table_id, counter_name) if counter_data is None: fields[pos] = STATUS_NA elif fields[pos] != STATUS_NA: @@ -180,18 +202,18 @@ class PgDropStat(object): print(tabulate(table, self.header_list, tablefmt='simple', stralign='right')) def get_counts(self, counters, oid): - """ - Get the PG drop counts for an individual counter. - """ - counts = {} - table_id = COUNTER_TABLE_PREFIX + oid - for counter in counters: - counter_data = self.counters_db.get(self.counters_db.COUNTERS_DB, table_id, counter) - if counter_data is None: - counts[table_id] = 0 - else: - counts[table_id] = int(counter_data) - return counts + """ + Get the PG drop counts for an individual counter. + """ + counts = {} + table_id = COUNTER_TABLE_PREFIX + oid + for counter in counters: + counter_data = self.get_counters_mapdata(table_id, counter) + if counter_data is None: + counts[table_id] = 0 + else: + counts[table_id] = int(counter_data) + return counts def get_counts_table(self, counters, object_table): """ @@ -199,10 +221,10 @@ class PgDropStat(object): to its PG drop counts. Counts are contained in a dictionary that maps counter oid to its counts. """ - counter_object_name_map = self.counters_db.get_all(self.counters_db.COUNTERS_DB, object_table) + counter_object_name_map = self.get_counters_mapall(object_table) current_stat_dict = OrderedDict() - if counter_object_name_map is None: + if not counter_object_name_map: return current_stat_dict for obj in natsorted(counter_object_name_map): @@ -239,10 +261,12 @@ def main(): epilog=""" Examples: pg-drop -c show +pg-drop -c show --namespace asic0 pg-drop -c clear """) parser.add_argument('-c', '--command', type=str, help='Desired action to perform') + parser.add_argument('-n', '--namespace', type=str, help='Namespace name or skip for all', default=None) args = parser.parse_args() command = args.command @@ -256,7 +280,16 @@ pg-drop -c clear print(e) sys.exit(e.errno) - pgdropstat = PgDropStat() + # Load database config files + load_db_config() + namespaces = multi_asic.get_namespace_list() + if args.namespace and args.namespace not in namespaces: + namespacelist = ', '.join(namespaces) + print(f"Input value for '--namespace' / '-n'. Choose from one of ({namespacelist})") + sys.exit(1) + + # For 'clear' command force applying to all namespaces + pgdropstat = PgDropStat(args.namespace if command != 'clear' else None) if command == 'clear': pgdropstat.clear_drop_counts() diff --git a/scripts/queuestat b/scripts/queuestat index 1f4c909096..16112d7af3 100755 --- a/scripts/queuestat +++ b/scripts/queuestat @@ -1,4 +1,4 @@ -#!/usr/bin/env python3 +#!/usr/bin/python3 ##################################################################### # @@ -41,8 +41,9 @@ from utilities_common import constants import utilities_common.multi_asic as multi_asic_util QueueStats = namedtuple("QueueStats", "queueindex, queuetype, totalpacket, totalbytes, droppacket, dropbytes") +VoqStats = namedtuple("VoqStats", "queueindex, queuetype, totalpacket, totalbytes, droppacket, dropbytes, creditWDpkts") header = ['Port', 'TxQ', 'Counter/pkts', 'Counter/bytes', 'Drop/pkts', 'Drop/bytes'] -voq_header = ['Port', 'Voq', 'Counter/pkts', 'Counter/bytes', 'Drop/pkts', 'Drop/bytes'] +voq_header = ['Port', 'Voq', 'Counter/pkts', 'Counter/bytes', 'Drop/pkts', 'Drop/bytes', 'Credit-WD-Del/pkts'] counter_bucket_dict = { 'SAI_QUEUE_STAT_PACKETS': 2, @@ -50,6 +51,9 @@ counter_bucket_dict = { 'SAI_QUEUE_STAT_DROPPED_PACKETS': 4, 'SAI_QUEUE_STAT_DROPPED_BYTES': 5, } +voq_counter_bucket_dict = { + 'SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS': 6 +} from utilities_common.cli import json_dump from utilities_common.netstat import ns_diff, STATUS_NA @@ -99,15 +103,24 @@ class voqCounterUtil(): metaData.setdefault(sysPort,{}).setdefault(fapId,set()).add(voqIdx) return metaData, ports -def build_json(port, cnstat): +def build_json(port, cnstat, voq=False): def ports_stats(k): p = {} - p[k[1]] = { - "totalpacket": k[2], - "totalbytes": k[3], - "droppacket": k[4], - "dropbytes": k[5] - } + if voq: + p[k[1]] = { + "totalpacket": k[2], + "totalbytes": k[3], + "droppacket": k[4], + "dropbytes": k[5], + "creditWDPkts": k[6] + } + else: + p[k[1]] = { + "totalpacket": k[2], + "totalbytes": k[3], + "droppacket": k[4], + "dropbytes": k[5] + } return p out = {} @@ -241,18 +254,30 @@ class Queuestat(object): print("Queue Type is invalid:", table_id, queue_type) sys.exit(1) - fields = ["0","0","0","0","0","0"] + if self.voq: + fields = ["0","0","0","0","0","0","0"] + else: + fields = ["0","0","0","0","0","0"] fields[0] = get_queue_index(table_id) fields[1] = get_queue_type(table_id) - for counter_name, pos in counter_bucket_dict.items(): + counter_dict = {} + counter_dict.update(counter_bucket_dict) + if self.voq: + counter_dict.update(voq_counter_bucket_dict) + + for counter_name, pos in counter_dict.items(): full_table_id = COUNTER_TABLE_PREFIX + table_id counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, counter_name) if counter_data is None: fields[pos] = STATUS_NA elif fields[pos] != STATUS_NA: fields[pos] = str(int(counter_data)) - cntr = QueueStats._make(fields)._asdict() + + if self.voq: + cntr = VoqStats._make(fields)._asdict() + else: + cntr = QueueStats._make(fields)._asdict() return cntr # Build a dictionary of the stats @@ -277,14 +302,21 @@ class Queuestat(object): if json_opt: json_output[port][key] = data continue - if not non_zero or data['totalpacket'] != '0' or data['totalbytes'] != '0' or \ - data['droppacket'] != '0' or data['dropbytes'] != '0': - table.append((port, data['queuetype'] + str(data['queueindex']), - data['totalpacket'], data['totalbytes'], - data['droppacket'], data['dropbytes'])) + if self.voq: + if not non_zero or data['totalpacket'] != '0' or data['totalbytes'] != '0' or \ + data['droppacket'] != '0' or data['dropbytes'] != '0' or data['creditWDpkts'] != '0': + table.append((port, data['queuetype'] + str(data['queueindex']), + data['totalpacket'], data['totalbytes'], + data['droppacket'], data['dropbytes'], data['creditWDpkts'])) + else: + if not non_zero or data['totalpacket'] != '0' or data['totalbytes'] != '0' or \ + data['droppacket'] != '0' or data['dropbytes'] != '0': + table.append((port, data['queuetype'] + str(data['queueindex']), + data['totalpacket'], data['totalbytes'], + data['droppacket'], data['dropbytes'])) if json_opt: - json_output[port].update(build_json(port, table)) + json_output[port].update(build_json(port, table, self.voq)) return json_output else: hdr = voq_header if self.voq else header @@ -308,25 +340,42 @@ class Queuestat(object): old_cntr = None if key in cnstat_old_dict: old_cntr = cnstat_old_dict.get(key) - if old_cntr is not None: - if not non_zero or ns_diff(cntr['totalpacket'], old_cntr['totalpacket']) != '0' or \ + if self.voq: + if not non_zero or ns_diff(cntr['totalpacket'], old_cntr['totalpacket']) != '0' or \ + ns_diff(cntr['totalbytes'], old_cntr['totalbytes']) != '0' or \ + ns_diff(cntr['droppacket'], old_cntr['droppacket']) != '0' or \ + ns_diff(cntr['dropbytes'], old_cntr['dropbytes']) != '0' or \ + ns_diff(cntr['creditWDpkts'], old_cntr['creditWDpkts']) != '0': + table.append((port, cntr['queuetype'] + str(cntr['queueindex']), + ns_diff(cntr['totalpacket'], old_cntr['totalpacket']), + ns_diff(cntr['totalbytes'], old_cntr['totalbytes']), + ns_diff(cntr['droppacket'], old_cntr['droppacket']), + ns_diff(cntr['dropbytes'], old_cntr['dropbytes']), + ns_diff(cntr['creditWDpkts'], old_cntr['creditWDpkts']))) + elif not non_zero or cntr['totalpacket'] != '0' or cntr['totalbytes'] != '0' or \ + cntr['droppacket'] != '0' or cntr['dropbytes'] != '0' or cntr['creditWDpkts'] != '0': + table.append((port, cntr['queuetype'] + str(cntr['queueindex']), + cntr['totalpacket'], cntr['totalbytes'], + cntr['droppacket'], cntr['dropbytes'], cntr['creditWDpkts'])) + else: + if not non_zero or ns_diff(cntr['totalpacket'], old_cntr['totalpacket']) != '0' or \ ns_diff(cntr['totalbytes'], old_cntr['totalbytes']) != '0' or \ ns_diff(cntr['droppacket'], old_cntr['droppacket']) != '0' or \ ns_diff(cntr['dropbytes'], old_cntr['dropbytes']) != '0': - table.append((port, cntr['queuetype'] + str(cntr['queueindex']), - ns_diff(cntr['totalpacket'], old_cntr['totalpacket']), - ns_diff(cntr['totalbytes'], old_cntr['totalbytes']), - ns_diff(cntr['droppacket'], old_cntr['droppacket']), - ns_diff(cntr['dropbytes'], old_cntr['dropbytes']))) - elif not non_zero or cntr['totalpacket'] != '0' or cntr['totalbytes'] != '0' or \ + table.append((port, cntr['queuetype'] + str(cntr['queueindex']), + ns_diff(cntr['totalpacket'], old_cntr['totalpacket']), + ns_diff(cntr['totalbytes'], old_cntr['totalbytes']), + ns_diff(cntr['droppacket'], old_cntr['droppacket']), + ns_diff(cntr['dropbytes'], old_cntr['dropbytes']))) + elif not non_zero or cntr['totalpacket'] != '0' or cntr['totalbytes'] != '0' or \ cntr['droppacket'] != '0' or cntr['dropbytes'] != '0': - table.append((port, cntr['queuetype'] + str(cntr['queueindex']), - cntr['totalpacket'], cntr['totalbytes'], - cntr['droppacket'], cntr['dropbytes'])) + table.append((port, cntr['queuetype'] + str(cntr['queueindex']), + cntr['totalpacket'], cntr['totalbytes'], + cntr['droppacket'], cntr['dropbytes'])) if json_opt: - json_output[port].update(build_json(port, table)) + json_output[port].update(build_json(port, table, self.voq)) return json_output else: hdr = voq_header if self.voq else header diff --git a/scripts/reboot b/scripts/reboot index 83e06e78e6..b6f8ff96fb 100755 --- a/scripts/reboot +++ b/scripts/reboot @@ -65,8 +65,8 @@ function stop_pmon_service() { CONTAINER_STOP_RC=0 debug "Stopping pmon docker" - docker kill pmon &> /dev/null || CONTAINER_STOP_RC=$? systemctl stop pmon || debug "Ignore stopping pmon error $?" + docker kill pmon &> /dev/null || CONTAINER_STOP_RC=$? if [[ CONTAINER_STOP_RC -ne 0 ]]; then debug "Failed killing container pmon RC $CONTAINER_STOP_RC ." fi diff --git a/scripts/route_check.py b/scripts/route_check.py index ee417dc49c..2fbe041547 100755 --- a/scripts/route_check.py +++ b/scripts/route_check.py @@ -328,6 +328,16 @@ def get_asicdb_routes(namespace): return (selector, subs, sorted(rt)) +def is_bgp_suppress_fib_pending_enabled(namespace): + """ + Retruns True if FIB suppression is enabled in BGP config, False otherwise + """ + show_run_cmd = ['show', 'runningconfiguration', 'bgp', '-n', namespace] + + output = subprocess.check_output(show_run_cmd, text=True) + return 'bgp suppress-fib-pending' in output + + def is_suppress_fib_pending_enabled(namespace): """ Returns True if FIB suppression is enabled, False otherwise @@ -781,18 +791,20 @@ def check_routes(namespace): results[namespace] = {} results[namespace]["Unaccounted_ROUTE_ENTRY_TABLE_entries"] = rt_asic_miss - rt_frr_miss = check_frr_pending_routes(namespace) + if is_bgp_suppress_fib_pending_enabled(namespace): + rt_frr_miss = check_frr_pending_routes(namespace) - if rt_frr_miss: - if namespace not in results: - results[namespace] = {} - results[namespace]["missed_FRR_routes"] = rt_frr_miss + if rt_frr_miss: + if namespace not in results: + results[namespace] = {} + results[namespace]["missed_FRR_routes"] = rt_frr_miss - if results: - if rt_frr_miss and not rt_appl_miss and not rt_asic_miss: - print_message(syslog.LOG_ERR, "Some routes are not set offloaded in FRR{} but all routes in APPL_DB and ASIC_DB are in sync".format(namespace)) - if is_suppress_fib_pending_enabled(namespace): - mitigate_installed_not_offloaded_frr_routes(namespace, rt_frr_miss, rt_appl) + if results: + if rt_frr_miss and not rt_appl_miss and not rt_asic_miss: + print_message(syslog.LOG_ERR, "Some routes are not set offloaded in FRR{} but all " + "routes in APPL_DB and ASIC_DB are in sync".format(namespace)) + if is_suppress_fib_pending_enabled(namespace): + mitigate_installed_not_offloaded_frr_routes(namespace, rt_frr_miss, rt_appl) if results: print_message(syslog.LOG_WARNING, "Failure results: {", json.dumps(results, indent=4), "}") diff --git a/scripts/soft-reboot b/scripts/soft-reboot index 957c6009eb..0b9030a6f7 100755 --- a/scripts/soft-reboot +++ b/scripts/soft-reboot @@ -64,8 +64,8 @@ function stop_pmon_service() { CONTAINER_STOP_RC=0 debug "Stopping pmon docker" - docker kill pmon &> /dev/null || CONTAINER_STOP_RC=$? systemctl stop pmon || debug "Ignore stopping pmon error $?" + docker kill pmon &> /dev/null || CONTAINER_STOP_RC=$? if [[ CONTAINER_STOP_RC -ne 0 ]]; then debug "Failed killing container pmon RC $CONTAINER_STOP_RC ." fi diff --git a/setup.py b/setup.py index a989acb876..6a66f012f9 100644 --- a/setup.py +++ b/setup.py @@ -250,7 +250,7 @@ 'semantic-version>=2.8.5', 'prettyprinter>=0.18.0', 'pyroute2>=0.5.14, <0.6.1', - 'requests>=2.25.0', + 'requests>=2.25.0, <=2.31.0', 'tabulate==0.9.0', 'toposort==1.6', 'www-authenticate==0.9.2', diff --git a/sfputil/main.py b/sfputil/main.py index ad0b1b3775..2c8f85d016 100644 --- a/sfputil/main.py +++ b/sfputil/main.py @@ -673,6 +673,20 @@ def eeprom(port, dump_dom, namespace): output += convert_sfp_info_to_output_string(xcvr_info) if dump_dom: + try: + api = platform_chassis.get_sfp(physical_port).get_xcvr_api() + except NotImplementedError: + output += "API is currently not implemented for this platform\n" + click.echo(output) + sys.exit(ERROR_NOT_IMPLEMENTED) + if api is None: + output += "API is none while getting DOM info!\n" + click.echo(output) + sys.exit(ERROR_NOT_IMPLEMENTED) + else: + if api.is_flat_memory(): + output += "DOM values not supported for flat memory module\n" + continue try: xcvr_dom_info = platform_chassis.get_sfp(physical_port).get_transceiver_bulk_status() except NotImplementedError: @@ -1306,6 +1320,62 @@ def reset(port_name): i += 1 + +# 'power' subgroup +@cli.group() +def power(): + """Enable or disable power of SFP transceiver""" + pass + + +# Helper method for setting low-power mode +def set_power(port_name, enable): + physical_port = logical_port_to_physical_port_index(port_name) + sfp = platform_chassis.get_sfp(physical_port) + + if is_port_type_rj45(port_name): + click.echo("Power disable/enable is not available for RJ45 port {}.".format(port_name)) + sys.exit(EXIT_FAIL) + + try: + presence = sfp.get_presence() + except NotImplementedError: + click.echo("sfp get_presence() NOT implemented!") + sys.exit(EXIT_FAIL) + + if not presence: + click.echo("{}: SFP EEPROM not detected\n".format(port_name)) + sys.exit(EXIT_FAIL) + + try: + result = platform_chassis.get_sfp(physical_port).set_power(enable) + except (NotImplementedError, AttributeError): + click.echo("This functionality is currently not implemented for this platform") + sys.exit(ERROR_NOT_IMPLEMENTED) + + if result: + click.echo("OK") + else: + click.echo("Failed") + sys.exit(EXIT_FAIL) + + +# 'disable' subcommand +@power.command() +@click.argument('port_name', metavar='') +def disable(port_name): + """Disable power of SFP transceiver""" + set_power(port_name, False) + + +# 'enable' subcommand +@power.command() +@click.argument('port_name', metavar='') +def enable(port_name): + """Enable power of SFP transceiver""" + set_power(port_name, True) + + def update_firmware_info_to_state_db(port_name): physical_port = logical_port_to_physical_port_index(port_name) @@ -1316,10 +1386,8 @@ def update_firmware_info_to_state_db(port_name): state_db.connect(state_db.STATE_DB) transceiver_firmware_info_dict = platform_chassis.get_sfp(physical_port).get_transceiver_info_firmware_versions() if transceiver_firmware_info_dict is not None: - active_firmware = transceiver_firmware_info_dict.get('active_firmware', 'N/A') - inactive_firmware = transceiver_firmware_info_dict.get('inactive_firmware', 'N/A') - state_db.set(state_db.STATE_DB, 'TRANSCEIVER_FIRMWARE_INFO|{}'.format(port_name), "active_firmware", active_firmware) - state_db.set(state_db.STATE_DB, 'TRANSCEIVER_FIRMWARE_INFO|{}'.format(port_name), "inactive_firmware", inactive_firmware) + for key, value in transceiver_firmware_info_dict.items(): + state_db.set(state_db.STATE_DB, 'TRANSCEIVER_FIRMWARE_INFO|{}'.format(port_name), key, value) # 'firmware' subgroup @cli.group() @@ -1890,5 +1958,50 @@ def get_overall_offset_sff8472(api, page, offset, size, wire_addr): return page * PAGE_SIZE + offset + PAGE_SIZE_FOR_A0H +# 'debug' subgroup +@cli.group() +def debug(): + """Module debug and diagnostic control""" + pass + + +# 'loopback' subcommand +@debug.command() +@click.argument('port_name', required=True, default=None) +@click.argument('loopback_mode', required=True, default="none", + type=click.Choice(["none", "host-side-input", "host-side-output", + "media-side-input", "media-side-output"])) +def loopback(port_name, loopback_mode): + """Set module diagnostic loopback mode + """ + physical_port = logical_port_to_physical_port_index(port_name) + sfp = platform_chassis.get_sfp(physical_port) + + if is_port_type_rj45(port_name): + click.echo("{}: This functionality is not applicable for RJ45 port".format(port_name)) + sys.exit(EXIT_FAIL) + + if not is_sfp_present(port_name): + click.echo("{}: SFP EEPROM not detected".format(port_name)) + sys.exit(EXIT_FAIL) + + try: + api = sfp.get_xcvr_api() + except NotImplementedError: + click.echo("{}: This functionality is not implemented".format(port_name)) + sys.exit(ERROR_NOT_IMPLEMENTED) + + try: + status = api.set_loopback_mode(loopback_mode) + except AttributeError: + click.echo("{}: Set loopback mode is not applicable for this module".format(port_name)) + sys.exit(ERROR_NOT_IMPLEMENTED) + + if status: + click.echo("{}: Set {} loopback".format(port_name, loopback_mode)) + else: + click.echo("{}: Set {} loopback failed".format(port_name, loopback_mode)) + sys.exit(EXIT_FAIL) + if __name__ == '__main__': cli() diff --git a/show/bgp_cli.py b/show/bgp_cli.py new file mode 100644 index 0000000000..d475638092 --- /dev/null +++ b/show/bgp_cli.py @@ -0,0 +1,128 @@ +import click +import tabulate +import json +import utilities_common.cli as clicommon + +from utilities_common.bgp import ( + CFG_BGP_DEVICE_GLOBAL, + BGP_DEVICE_GLOBAL_KEY, + to_str, +) + + +# +# BGP helpers --------------------------------------------------------------------------------------------------------- +# + + +def format_attr_value(entry, attr): + """ Helper that formats attribute to be presented in the table output. + + Args: + entry (Dict[str, str]): CONFIG DB entry configuration. + attr (Dict): Attribute metadata. + + Returns: + str: formatted attribute value. + """ + + if attr["is-leaf-list"]: + value = entry.get(attr["name"], []) + return "\n".join(value) if value else "N/A" + return entry.get(attr["name"], "N/A") + + +# +# BGP CLI ------------------------------------------------------------------------------------------------------------- +# + + +@click.group( + name="bgp", + cls=clicommon.AliasedGroup +) +def BGP(): + """ Show BGP configuration """ + + pass + + +# +# BGP device-global --------------------------------------------------------------------------------------------------- +# + + +@BGP.command( + name="device-global" +) +@click.option( + "-j", "--json", "json_format", + help="Display in JSON format", + is_flag=True, + default=False +) +@clicommon.pass_db +@click.pass_context +def DEVICE_GLOBAL(ctx, db, json_format): + """ Show BGP device global state """ + + header = [ + "TSA", + "W-ECMP", + ] + body = [] + + table = db.cfgdb.get_table(CFG_BGP_DEVICE_GLOBAL) + entry = table.get(BGP_DEVICE_GLOBAL_KEY, {}) + + if not entry: + click.echo("No configuration is present in CONFIG DB") + ctx.exit(0) + + if json_format: + json_dict = { + "tsa": to_str( + format_attr_value( + entry, + { + 'name': 'tsa_enabled', + 'is-leaf-list': False + } + ) + ), + "w-ecmp": to_str( + format_attr_value( + entry, + { + 'name': 'wcmp_enabled', + 'is-leaf-list': False + } + ) + ) + } + click.echo(json.dumps(json_dict, indent=4)) + ctx.exit(0) + + row = [ + to_str( + format_attr_value( + entry, + { + 'name': 'tsa_enabled', + 'is-leaf-list': False + } + ) + ), + to_str( + format_attr_value( + entry, + { + 'name': 'wcmp_enabled', + 'is-leaf-list': False + } + ) + ) + ] + body.append(row) + + click.echo(tabulate.tabulate(body, header)) diff --git a/show/bgp_frr_v4.py b/show/bgp_frr_v4.py index 6343e8b7b2..ddcd688581 100644 --- a/show/bgp_frr_v4.py +++ b/show/bgp_frr_v4.py @@ -1,6 +1,8 @@ import click +import sys +import subprocess -from sonic_py_common import multi_asic +from sonic_py_common import multi_asic, device_info from show.main import ip import utilities_common.bgp_util as bgp_util import utilities_common.cli as clicommon @@ -17,7 +19,14 @@ @ip.group(cls=clicommon.AliasedGroup) def bgp(): """Show IPv4 BGP (Border Gateway Protocol) information""" - pass + if device_info.is_supervisor(): + subcommand = sys.argv[3] + if subcommand not in "network": + # the command will be executed directly by rexec if it is not "show ip bgp network" + click.echo("Since the current device is a chassis supervisor, " + + "this command will be executed remotely on all linecards") + proc = subprocess.run(["rexec", "all"] + ["-c", " ".join(sys.argv)]) + sys.exit(proc.returncode) # 'summary' subcommand ("show ip bgp summary") @@ -84,7 +93,7 @@ def neighbors(ipaddress, info_type, namespace): @bgp.command() @click.argument('ipaddress', metavar='[|]', - required=False) + required=True if device_info.is_supervisor() else False) @click.argument('info_type', metavar='[bestpath|json|longer-prefixes|multipath]', type=click.Choice( @@ -95,17 +104,26 @@ def neighbors(ipaddress, info_type, namespace): 'namespace', type=str, show_default=True, - required=True if multi_asic.is_multi_asic is True else False, + required=False, help='Namespace name or all', - default=multi_asic.DEFAULT_NAMESPACE, + default="all", callback=multi_asic_util.multi_asic_namespace_validation_callback) def network(ipaddress, info_type, namespace): """Show IP (IPv4) BGP network""" - if multi_asic.is_multi_asic() and namespace not in multi_asic.get_namespace_list(): - ctx = click.get_current_context() - ctx.fail('-n/--namespace option required. provide namespace from list {}'\ - .format(multi_asic.get_namespace_list())) + if device_info.is_supervisor(): + # the command will be executed by rexec + click.echo("Since the current device is a chassis supervisor, " + + "this command will be executed remotely on all linecards") + proc = subprocess.run(["rexec", "all"] + ["-c", " ".join(sys.argv)]) + sys.exit(proc.returncode) + + namespace = namespace.strip() + if multi_asic.is_multi_asic(): + if namespace != "all" and namespace not in multi_asic.get_namespace_list(): + ctx = click.get_current_context() + ctx.fail('invalid namespace {}. provide namespace from list {}' + .format(namespace, multi_asic.get_namespace_list())) command = 'show ip bgp' if ipaddress is not None: @@ -125,5 +143,15 @@ def network(ipaddress, info_type, namespace): if info_type is not None: command += ' {}'.format(info_type) - output = bgp_util.run_bgp_show_command(command, namespace) - click.echo(output.rstrip('\n')) + if namespace == "all": + if multi_asic.is_multi_asic(): + for ns in multi_asic.get_namespace_list(): + click.echo("\n======== namespace {} ========".format(ns)) + output = bgp_util.run_bgp_show_command(command, ns) + click.echo(output.rstrip('\n')) + else: + output = bgp_util.run_bgp_show_command(command, "") + click.echo(output.rstrip('\n')) + else: + output = bgp_util.run_bgp_show_command(command, namespace) + click.echo(output.rstrip('\n')) diff --git a/show/dropcounters.py b/show/dropcounters.py index 30779b9364..9bb988fc5b 100644 --- a/show/dropcounters.py +++ b/show/dropcounters.py @@ -1,5 +1,6 @@ import click import utilities_common.cli as clicommon +import utilities_common.multi_asic as multi_asic_util # @@ -41,7 +42,8 @@ def capabilities(verbose): @click.option('-g', '--group', required=False) @click.option('-t', '--counter_type', required=False) @click.option('--verbose', is_flag=True, help="Enable verbose output") -def counts(group, counter_type, verbose): +@multi_asic_util.multi_asic_click_option_namespace +def counts(group, counter_type, verbose, namespace): """Show drop counts""" cmd = ['dropstat', '-c', 'show'] @@ -51,4 +53,7 @@ def counts(group, counter_type, verbose): if counter_type: cmd += ['-t', str(counter_type)] + if namespace: + cmd += ['-n', str(namespace)] + clicommon.run_command(cmd, display_cmd=verbose) diff --git a/show/fabric.py b/show/fabric.py index 785e1ab477..898c76114c 100644 --- a/show/fabric.py +++ b/show/fabric.py @@ -74,3 +74,13 @@ def queue(namespace): if namespace is not None: cmd += ['-n', str(namespace)] clicommon.run_command(cmd) + + +@counters.command() +@multi_asic_util.multi_asic_click_option_namespace +def rate(namespace): + """Show fabric counters rate""" + cmd = ['fabricstat', '-s'] + if namespace is not None: + cmd += ['-n', str(namespace)] + clicommon.run_command(cmd) diff --git a/show/main.py b/show/main.py index cfdf30d3c6..c9e5e2086c 100755 --- a/show/main.py +++ b/show/main.py @@ -66,6 +66,7 @@ from . import plugins from . import syslog from . import dns +from . import bgp_cli # Global Variables PLATFORM_JSON = 'platform.json' @@ -164,7 +165,7 @@ def get_config_json_by_namespace(namespace): iface_alias_converter = lazy_object_proxy.Proxy(lambda: clicommon.InterfaceAliasConverter()) # -# Display all storm-control data +# Display all storm-control data # def display_storm_all(): """ Show storm-control """ @@ -325,6 +326,8 @@ def cli(ctx): if is_gearbox_configured(): cli.add_command(gearbox.gearbox) +# bgp module +cli.add_command(bgp_cli.BGP) # # 'vrf' command ("show vrf") @@ -462,7 +465,7 @@ def is_mgmt_vrf_enabled(ctx): return False # -# 'storm-control' group +# 'storm-control' group # "show storm-control [interface ]" # @cli.group('storm-control', invoke_without_command=True) @@ -854,9 +857,12 @@ def drop(): pass @drop.command('counters') -def pg_drop_counters(): +@multi_asic_util.multi_asic_click_option_namespace +def pg_drop_counters(namespace): """Show dropped packets for priority-group""" command = ['pg-drop', '-c', 'show'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) @priority_group.group(name='persistent-watermark') @@ -1184,7 +1190,11 @@ def protocol(verbose): ip.add_command(bgp) from .bgp_frr_v6 import bgp ipv6.add_command(bgp) - +elif device_info.is_supervisor(): + from .bgp_frr_v4 import bgp + ip.add_command(bgp) + from .bgp_frr_v6 import bgp + ipv6.add_command(bgp) # # 'link-local-mode' subcommand ("show ipv6 link-local-mode") # @@ -1998,10 +2008,13 @@ def policer(policer_name, verbose): # 'ecn' command ("show ecn") # @cli.command('ecn') +@multi_asic_util.multi_asic_click_option_namespace @click.option('--verbose', is_flag=True, help="Enable verbose output") -def ecn(verbose): +def ecn(namespace, verbose): """Show ECN configuration""" cmd = ['ecnconfig', '-l'] + if namespace is not None: + cmd += ['-n', str(namespace)] run_command(cmd, display_cmd=verbose) @@ -2108,7 +2121,7 @@ def summary(db): key_values = key.split('|') values = db.db.get_all(db.db.STATE_DB, key) if "local_discriminator" not in values.keys(): - values["local_discriminator"] = "NA" + values["local_discriminator"] = "NA" bfd_body.append([key_values[3], key_values[2], key_values[1], values["state"], values["type"], values["local_addr"], values["tx_interval"], values["rx_interval"], values["multiplier"], values["multihop"], values["local_discriminator"]]) @@ -2139,24 +2152,13 @@ def peer(db, peer_ip): key_values = key.split(delimiter) values = db.db.get_all(db.db.STATE_DB, key) if "local_discriminator" not in values.keys(): - values["local_discriminator"] = "NA" + values["local_discriminator"] = "NA" bfd_body.append([key_values[3], key_values[2], key_values[1], values.get("state"), values.get("type"), values.get("local_addr"), values.get("tx_interval"), values.get("rx_interval"), values.get("multiplier"), values.get("multihop"), values.get("local_discriminator")]) click.echo(tabulate(bfd_body, bfd_headers)) -# 'suppress-fib-pending' subcommand ("show suppress-fib-pending") -@cli.command('suppress-fib-pending') -@clicommon.pass_db -def suppress_pending_fib(db): - """ Show the status of suppress pending FIB feature """ - - field_values = db.cfgdb.get_entry('DEVICE_METADATA', 'localhost') - state = field_values.get('suppress-fib-pending', 'disabled').title() - click.echo(state) - - # asic-sdk-health-event subcommand ("show asic-sdk-health-event") @cli.group(cls=clicommon.AliasedGroup) def asic_sdk_health_event(): diff --git a/show/plugins/pbh.py b/show/plugins/pbh.py index 407c596163..f47b43fbdc 100644 --- a/show/plugins/pbh.py +++ b/show/plugins/pbh.py @@ -395,7 +395,7 @@ def get_counter_value(pbh_counters, saved_pbh_counters, key, type): if not pbh_counters[key]: return '0' - if key in saved_pbh_counters: + if key in saved_pbh_counters and saved_pbh_counters[key]: new_value = int(pbh_counters[key][type]) - int(saved_pbh_counters[key][type]) if new_value >= 0: return str(new_value) diff --git a/sonic_package_manager/service_creator/creator.py b/sonic_package_manager/service_creator/creator.py index 15d3aedd76..57f8ac4624 100644 --- a/sonic_package_manager/service_creator/creator.py +++ b/sonic_package_manager/service_creator/creator.py @@ -2,6 +2,7 @@ import contextlib import os +import glob import sys import shutil import stat @@ -33,6 +34,7 @@ TIMER_UNIT_TEMPLATE = 'timer.unit.j2' SYSTEMD_LOCATION = '/usr/lib/systemd/system' +ETC_SYSTEMD_LOCATION = '/etc/systemd/system' GENERATED_SERVICES_CONF_FILE = '/etc/sonic/generated_services.conf' @@ -92,18 +94,30 @@ def set_executable_bit(filepath): os.chmod(filepath, st.st_mode | stat.S_IEXEC) -def remove_if_exists(path): +def remove_file(path): """ Remove filepath if it exists """ - if not os.path.exists(path): - return + try: + os.remove(path) + log.info(f'removed {path}') + except FileNotFoundError: + pass + + +def remove_dir(path): + """ Remove filepath if it exists """ + + try: + shutil.rmtree(path) + log.info(f'removed {path}') + except FileNotFoundError: + pass - os.remove(path) - log.info(f'removed {path}') def is_list_of_strings(command): return isinstance(command, list) and all(isinstance(item, str) for item in command) + def run_command(command: List[str]): """ Run arbitrary bash command. Args: @@ -197,12 +211,22 @@ def remove(self, """ name = package.manifest['service']['name'] - remove_if_exists(os.path.join(SYSTEMD_LOCATION, f'{name}.service')) - remove_if_exists(os.path.join(SYSTEMD_LOCATION, f'{name}@.service')) - remove_if_exists(os.path.join(SERVICE_MGMT_SCRIPT_LOCATION, f'{name}.sh')) - remove_if_exists(os.path.join(DOCKER_CTL_SCRIPT_LOCATION, f'{name}.sh')) - remove_if_exists(os.path.join(DEBUG_DUMP_SCRIPT_LOCATION, f'{name}')) - remove_if_exists(os.path.join(ETC_SONIC_PATH, f'{name}_reconcile')) + remove_file(os.path.join(SYSTEMD_LOCATION, f'{name}.service')) + remove_file(os.path.join(SYSTEMD_LOCATION, f'{name}@.service')) + remove_file(os.path.join(SERVICE_MGMT_SCRIPT_LOCATION, f'{name}.sh')) + remove_file(os.path.join(DOCKER_CTL_SCRIPT_LOCATION, f'{name}.sh')) + remove_file(os.path.join(DEBUG_DUMP_SCRIPT_LOCATION, f'{name}')) + remove_file(os.path.join(ETC_SONIC_PATH, f'{name}_reconcile')) + + # remove symlinks and configuration directories created by featured + remove_file(os.path.join(ETC_SYSTEMD_LOCATION, f'{name}.service')) + for unit_file in glob.glob(os.path.join(ETC_SYSTEMD_LOCATION, f'{name}@*.service')): + remove_file(unit_file) + + remove_dir(os.path.join(ETC_SYSTEMD_LOCATION, f'{name}.service.d')) + for unit_dir in glob.glob(os.path.join(ETC_SYSTEMD_LOCATION, f'{name}@*.service.d')): + remove_dir(unit_dir) + self.update_dependent_list_file(package, remove=True) self.update_generated_services_conf_file(package, remove=True) diff --git a/ssdutil/main.py b/ssdutil/main.py index 62f43037e7..7b6f2c1ca1 100755 --- a/ssdutil/main.py +++ b/ssdutil/main.py @@ -39,7 +39,7 @@ def import_ssd_api(diskdev): except ImportError as e: log.log_warning("Platform specific SsdUtil module not found. Falling down to the generic implementation") try: - from sonic_platform_base.sonic_ssd.ssd_generic import SsdUtil + from sonic_platform_base.sonic_storage.ssd import SsdUtil except ImportError as e: log.log_error("Failed to import default SsdUtil. Error: {}".format(str(e)), True) raise e diff --git a/tests/bgp_commands_input/bgp_network_test_vector.py b/tests/bgp_commands_input/bgp_network_test_vector.py index da93e8e8e8..f9edd66fa2 100644 --- a/tests/bgp_commands_input/bgp_network_test_vector.py +++ b/tests/bgp_commands_input/bgp_network_test_vector.py @@ -227,6 +227,9 @@ multi_asic_bgp_network_err = \ """Error: -n/--namespace option required. provide namespace from list ['asic0', 'asic1']""" +multi_asic_bgp_network_asic_unknown_err = \ + """Error: invalid namespace asic_unknown. provide namespace from list ['asic0', 'asic1']""" + bgp_v4_network_asic0 = \ """ BGP table version is 11256, local router ID is 10.1.0.32, vrf id 0 @@ -276,7 +279,7 @@ *=i10.0.0.42/31 10.1.0.2 0 100 0 ? *>i 10.1.0.0 0 100 0 ? *=i10.0.0.44/31 10.1.0.2 0 100 0 ? -*>i 10.1.0.0 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? """ bgp_v4_network_ip_address_asic0 = \ @@ -311,6 +314,111 @@ Last update: Thu Apr 22 02:13:30 2021 """ +bgp_v4_network_all_asic = \ + """ +======== namespace asic0 ======== + +BGP table version is 11256, local router ID is 10.1.0.32, vrf id 0 +Default local pref 100, local AS 65100 +Status codes: s suppressed, d damped, h history, * valid, > best, = multipath, + i internal, r RIB-failure, S Stale, R Removed +Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self +Origin codes: i - IGP, e - EGP, ? - incomplete + + Network Next Hop Metric LocPrf Weight Path +* i0.0.0.0/0 10.1.0.2 100 0 65200 6666 6667 i +* i 10.1.0.0 100 0 65200 6666 6667 i +*= 10.0.0.5 0 65200 6666 6667 i +*> 10.0.0.1 0 65200 6666 6667 i +* i8.0.0.0/32 10.1.0.2 0 100 0 i +* i 10.1.0.0 0 100 0 i +* 0.0.0.0 0 32768 ? +*> 0.0.0.0 0 32768 i +*=i8.0.0.1/32 10.1.0.2 0 100 0 i +*>i 10.1.0.0 0 100 0 i +*=i8.0.0.2/32 10.1.0.2 0 100 0 i +*>i 10.1.0.0 0 100 0 i +*=i8.0.0.3/32 10.1.0.2 0 100 0 i +*>i 10.1.0.0 0 100 0 i +*>i8.0.0.4/32 10.1.0.0 0 100 0 i +*>i8.0.0.5/32 10.1.0.2 0 100 0 i +* i10.0.0.0/31 10.1.0.2 0 100 0 ? +* i 10.1.0.0 0 100 0 ? +*> 0.0.0.0 0 32768 ? +* i10.0.0.4/31 10.1.0.2 0 100 0 ? +* i 10.1.0.0 0 100 0 ? +*> 0.0.0.0 0 32768 ? +*=i10.0.0.8/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.12/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.32/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.34/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.36/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.38/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.40/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.42/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.44/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? + +======== namespace asic1 ======== + +BGP table version is 11256, local router ID is 10.1.0.32, vrf id 0 +Default local pref 100, local AS 65100 +Status codes: s suppressed, d damped, h history, * valid, > best, = multipath, + i internal, r RIB-failure, S Stale, R Removed +Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self +Origin codes: i - IGP, e - EGP, ? - incomplete + + Network Next Hop Metric LocPrf Weight Path +* i0.0.0.0/0 10.1.0.2 100 0 65200 6666 6667 i +* i 10.1.0.0 100 0 65200 6666 6667 i +*= 10.0.0.5 0 65200 6666 6667 i +*> 10.0.0.1 0 65200 6666 6667 i +* i8.0.0.0/32 10.1.0.2 0 100 0 i +* i 10.1.0.0 0 100 0 i +* 0.0.0.0 0 32768 ? +*> 0.0.0.0 0 32768 i +*=i8.0.0.1/32 10.1.0.2 0 100 0 i +*>i 10.1.0.0 0 100 0 i +*=i8.0.0.2/32 10.1.0.2 0 100 0 i +*>i 10.1.0.0 0 100 0 i +*=i8.0.0.3/32 10.1.0.2 0 100 0 i +*>i 10.1.0.0 0 100 0 i +*>i8.0.0.4/32 10.1.0.0 0 100 0 i +*>i8.0.0.5/32 10.1.0.2 0 100 0 i +* i10.0.0.0/31 10.1.0.2 0 100 0 ? +* i 10.1.0.0 0 100 0 ? +*> 0.0.0.0 0 32768 ? +* i10.0.0.4/31 10.1.0.2 0 100 0 ? +* i 10.1.0.0 0 100 0 ? +*> 0.0.0.0 0 32768 ? +*=i10.0.0.8/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.12/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.32/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.34/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.36/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.38/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.40/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.42/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.44/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +""" + bgp_v6_network_asic0 = \ """ BGP table version is 12849, local router ID is 10.1.0.32, vrf id 0 @@ -429,6 +537,9 @@ def mock_show_bgp_network_multi_asic(param): return bgp_v6_network_ip_address_asic0 elif param == 'bgp_v6_network_bestpath_asic0': return bgp_v6_network_ip_address_asic0_bestpath + elif param == "bgp_v4_network_all_asic": + # this is mocking the output of a single LC + return bgp_v4_network_asic0 else: return '' @@ -454,6 +565,11 @@ def mock_show_bgp_network_multi_asic(param): 'rc': 1, 'rc_output': bgp_v4_network_longer_prefixes_error }, + 'bgp_v4_network_all_asic_on_single_asic': { + 'args': ['-nall'], + 'rc': 0, + 'rc_output': bgp_v4_network + }, 'bgp_v6_network': { 'args': [], 'rc': 0, @@ -479,10 +595,10 @@ def mock_show_bgp_network_multi_asic(param): 'rc': 0, 'rc_output': bgp_v6_network_longer_prefixes }, - 'bgp_v4_network_multi_asic': { + 'bgp_v4_network_default_multi_asic': { 'args': [], - 'rc': 2, - 'rc_err_msg': multi_asic_bgp_network_err + 'rc': 0, + 'rc_output': bgp_v4_network_all_asic }, 'bgp_v4_network_asic0': { 'args': ['-nasic0'], @@ -499,6 +615,16 @@ def mock_show_bgp_network_multi_asic(param): 'rc': 0, 'rc_output': bgp_v4_network_bestpath_asic0 }, + 'bgp_v4_network_all_asic': { + 'args': ['-nall'], + 'rc': 0, + 'rc_output': bgp_v4_network_all_asic + }, + 'bgp_v4_network_asic_unknown': { + 'args': ['-nasic_unknown'], + 'rc': 2, + 'rc_err_msg': multi_asic_bgp_network_asic_unknown_err + }, 'bgp_v6_network_multi_asic': { 'args': [], 'rc': 2, diff --git a/tests/bgp_commands_test.py b/tests/bgp_commands_test.py index a60ba8c81f..11415e8727 100644 --- a/tests/bgp_commands_test.py +++ b/tests/bgp_commands_test.py @@ -336,7 +336,7 @@ 3.3.3.8 4 65100 12 10 0 0 0 00:00:15 4 str2-sonic-lc1-1-ASIC1 Total number of neighbors 6 -""" +""" # noqa: E501 class TestBgpCommandsSingleAsic(object): diff --git a/tests/bgp_input/assert_show_output.py b/tests/bgp_input/assert_show_output.py new file mode 100644 index 0000000000..3671c3ce5f --- /dev/null +++ b/tests/bgp_input/assert_show_output.py @@ -0,0 +1,55 @@ +""" +Module holding the correct values for show CLI command outputs for the bgp_test.py +""" + +show_device_global_empty = """\ +No configuration is present in CONFIG DB +""" + +show_device_global_all_disabled = """\ +TSA W-ECMP +-------- -------- +disabled disabled +""" +show_device_global_all_disabled_json = """\ +{ + "tsa": "disabled", + "w-ecmp": "disabled" +} +""" + +show_device_global_all_enabled = """\ +TSA W-ECMP +------- -------- +enabled enabled +""" +show_device_global_all_enabled_json = """\ +{ + "tsa": "enabled", + "w-ecmp": "enabled" +} +""" + +show_device_global_tsa_enabled = """\ +TSA W-ECMP +------- -------- +enabled disabled +""" +show_device_global_tsa_enabled_json = """\ +{ + "tsa": "enabled", + "w-ecmp": "disabled" +} +""" + +show_device_global_wcmp_enabled = """\ +TSA W-ECMP +-------- -------- +disabled enabled +""" +show_device_global_wcmp_enabled_json = """\ +{ + "tsa": "disabled", + "w-ecmp": "enabled" +} +""" diff --git a/tests/bgp_input/mock_config/all_disabled.json b/tests/bgp_input/mock_config/all_disabled.json new file mode 100644 index 0000000000..30a929c7b7 --- /dev/null +++ b/tests/bgp_input/mock_config/all_disabled.json @@ -0,0 +1,6 @@ +{ + "BGP_DEVICE_GLOBAL|STATE": { + "tsa_enabled": "false", + "wcmp_enabled": "false" + } +} diff --git a/tests/bgp_input/mock_config/all_enabled.json b/tests/bgp_input/mock_config/all_enabled.json new file mode 100644 index 0000000000..eab39897bb --- /dev/null +++ b/tests/bgp_input/mock_config/all_enabled.json @@ -0,0 +1,6 @@ +{ + "BGP_DEVICE_GLOBAL|STATE": { + "tsa_enabled": "true", + "wcmp_enabled": "true" + } +} diff --git a/tests/bgp_input/mock_config/empty.json b/tests/bgp_input/mock_config/empty.json new file mode 100644 index 0000000000..e77dd4d79e --- /dev/null +++ b/tests/bgp_input/mock_config/empty.json @@ -0,0 +1,5 @@ +{ + "BGP_DEVICE_GLOBAL|STATE": { + "NULL": "NULL" + } +} diff --git a/tests/bgp_input/mock_config/tsa_enabled.json b/tests/bgp_input/mock_config/tsa_enabled.json new file mode 100644 index 0000000000..9c72a5f79d --- /dev/null +++ b/tests/bgp_input/mock_config/tsa_enabled.json @@ -0,0 +1,6 @@ +{ + "BGP_DEVICE_GLOBAL|STATE": { + "tsa_enabled": "true", + "wcmp_enabled": "false" + } +} diff --git a/tests/bgp_input/mock_config/wcmp_enabled.json b/tests/bgp_input/mock_config/wcmp_enabled.json new file mode 100644 index 0000000000..fddc76b618 --- /dev/null +++ b/tests/bgp_input/mock_config/wcmp_enabled.json @@ -0,0 +1,6 @@ +{ + "BGP_DEVICE_GLOBAL|STATE": { + "tsa_enabled": "false", + "wcmp_enabled": "true" + } +} diff --git a/tests/bgp_test.py b/tests/bgp_test.py new file mode 100644 index 0000000000..d64d0b9eea --- /dev/null +++ b/tests/bgp_test.py @@ -0,0 +1,130 @@ +import pytest +import os +import logging +import show.main as show +import config.main as config + +from click.testing import CliRunner +from utilities_common.db import Db +from .mock_tables import dbconnector +from .bgp_input import assert_show_output + + +test_path = os.path.dirname(os.path.abspath(__file__)) +input_path = os.path.join(test_path, "bgp_input") +mock_config_path = os.path.join(input_path, "mock_config") + +logger = logging.getLogger(__name__) + + +SUCCESS = 0 + + +class TestBgp: + @classmethod + def setup_class(cls): + logger.info("Setup class: {}".format(cls.__name__)) + os.environ['UTILITIES_UNIT_TESTING'] = "1" + + @classmethod + def teardown_class(cls): + logger.info("Teardown class: {}".format(cls.__name__)) + os.environ['UTILITIES_UNIT_TESTING'] = "0" + dbconnector.dedicated_dbs.clear() + + # ---------- CONFIG BGP ---------- # + + @pytest.mark.parametrize( + "feature", [ + "tsa", + "w-ecmp" + ] + ) + @pytest.mark.parametrize( + "state", [ + "enabled", + "disabled" + ] + ) + def test_config_device_global(self, feature, state): + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["bgp"].commands["device-global"]. + commands[feature].commands[state], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + + assert result.exit_code == SUCCESS + + # ---------- SHOW BGP ---------- # + + @pytest.mark.parametrize( + "cfgdb,output", [ + pytest.param( + os.path.join(mock_config_path, "empty"), + { + "plain": assert_show_output.show_device_global_empty, + "json": assert_show_output.show_device_global_empty + }, + id="empty" + ), + pytest.param( + os.path.join(mock_config_path, "all_disabled"), + { + "plain": assert_show_output.show_device_global_all_disabled, + "json": assert_show_output.show_device_global_all_disabled_json + }, + id="all-disabled" + ), + pytest.param( + os.path.join(mock_config_path, "all_enabled"), + { + "plain": assert_show_output.show_device_global_all_enabled, + "json": assert_show_output.show_device_global_all_enabled_json + }, + id="all-enabled" + ), + pytest.param( + os.path.join(mock_config_path, "tsa_enabled"), + { + "plain": assert_show_output.show_device_global_tsa_enabled, + "json": assert_show_output.show_device_global_tsa_enabled_json + }, + id="tsa-enabled" + ), + pytest.param( + os.path.join(mock_config_path, "wcmp_enabled"), + { + "plain": assert_show_output.show_device_global_wcmp_enabled, + "json": assert_show_output.show_device_global_wcmp_enabled_json + }, + id="w-ecmp-enabled" + ) + ] + ) + @pytest.mark.parametrize( + "format", [ + "plain", + "json", + ] + ) + def test_show_device_global(self, cfgdb, output, format): + dbconnector.dedicated_dbs["CONFIG_DB"] = cfgdb + + db = Db() + runner = CliRunner() + + result = runner.invoke( + show.cli.commands["bgp"].commands["device-global"], + [] if format == "plain" else ["--json"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + + assert result.output == output[format] + assert result.exit_code == SUCCESS diff --git a/tests/chassis_modules_test.py b/tests/chassis_modules_test.py old mode 100644 new mode 100755 index 940e30c04b..681e3d2c13 --- a/tests/chassis_modules_test.py +++ b/tests/chassis_modules_test.py @@ -7,6 +7,8 @@ import tests.mock_tables.dbconnector from utilities_common.db import Db from .utils import get_result_and_return_code +from unittest import mock +sys.modules['clicommon'] = mock.Mock() show_linecard0_shutdown_output="""\ LINE-CARD0 line-card 1 Empty down LC1000101 @@ -15,6 +17,15 @@ show_linecard0_startup_output="""\ LINE-CARD0 line-card 1 Empty up LC1000101 """ + +show_fabriccard0_shutdown_output = """\ +FABRIC-CARD0 fabric-card 17 Online down FC1000101 +""" + +show_fabriccard0_startup_output = """\ +FABRIC-CARD0 fabric-card 17 Online up FC1000101 +""" + header_lines = 2 warning_lines = 0 @@ -113,6 +124,11 @@ Linecard4|Asic2|PortChannel0001 2 22 Linecard4|Asic2|Ethernet29, Linecard4|Asic2|Ethernet30 """ + +def mock_run_command_side_effect(*args, **kwargs): + return '', 0 + + class TestChassisModules(object): @classmethod def setup_class(cls): @@ -186,6 +202,47 @@ def test_config_shutdown_module(self): #db.cfgdb.set_entry("CHASSIS_MODULE", "LINE-CARD0", { "admin_status" : "down" }) #db.get_data("CHASSIS_MODULE", "LINE-CARD0") + def test_config_shutdown_module_fabric(self): + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: + runner = CliRunner() + db = Db() + + chassisdb = db.db + chassisdb.connect("CHASSIS_STATE_DB") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic6", "asic_id_in_module", "0") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic6", "asic_pci_address", "nokia-bdb:4:0") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic6", "name", "FABRIC-CARD0") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic7", "asic_id_in_module", "1") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic7", "asic_pci_address", "nokia-bdb:4:1") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic7", "name", "FABRIC-CARD0") + chassisdb.close("CHASSIS_STATE_DB") + + result = runner.invoke(config.config.commands["chassis"].commands["modules"].commands["shutdown"], + ["FABRIC-CARD0"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + result = runner.invoke(show.cli.commands["chassis"].commands["modules"].commands["status"], + ["FABRIC-CARD0"], obj=db) + print(result.exit_code) + print(result.output) + result_lines = result.output.strip('\n').split('\n') + assert result.exit_code == 0 + header_lines = 2 + result_out = " ".join((result_lines[header_lines]).split()) + assert result_out.strip('\n') == show_fabriccard0_shutdown_output.strip('\n') + + fvs = {'admin_status': 'down'} + db.cfgdb.set_entry('CHASSIS_MODULE', "FABRIC-CARD0", fvs) + result = runner.invoke(config.config.commands["chassis"].commands["modules"].commands["shutdown"], + ["FABRIC-CARD0"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 6 + def test_config_startup_module(self): runner = CliRunner() db = Db() @@ -202,6 +259,62 @@ def test_config_startup_module(self): result_out = " ".join((result_lines[header_lines]).split()) assert result_out.strip('\n') == show_linecard0_startup_output.strip('\n') + def test_config_startup_module_fabric(self): + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: + runner = CliRunner() + db = Db() + + chassisdb = db.db + chassisdb.connect("CHASSIS_STATE_DB") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic6", "asic_id_in_module", "0") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic6", "asic_pci_address", "nokia-bdb:4:0") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic6", "name", "FABRIC-CARD0") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic7", "asic_id_in_module", "1") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic7", "asic_pci_address", "nokia-bdb:4:1") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic7", "name", "FABRIC-CARD0") + chassisdb.close("CHASSIS_STATE_DB") + + # FC is down and doing startup + fvs = {'admin_status': 'down'} + db.cfgdb.set_entry('CHASSIS_MODULE', "FABRIC-CARD0", fvs) + + result = runner.invoke(config.config.commands["chassis"].commands["modules"].commands["startup"], + ["FABRIC-CARD0"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + result = runner.invoke(show.cli.commands["chassis"].commands["modules"].commands["status"], + ["FABRIC-CARD0"], obj=db) + print(result.exit_code) + print(result.output) + result_lines = result.output.strip('\n').split('\n') + assert result.exit_code == 0 + result_out = " ".join((result_lines[header_lines]).split()) + assert result_out.strip('\n') == show_fabriccard0_startup_output.strip('\n') + assert mock_run_command.call_count == 2 + + # FC is up and doing startup + fvs = {'admin_status': 'up'} + db.cfgdb.set_entry('CHASSIS_MODULE', "FABRIC-CARD0", fvs) + + result = runner.invoke(config.config.commands["chassis"].commands["modules"].commands["startup"], + ["FABRIC-CARD0"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + result = runner.invoke(show.cli.commands["chassis"].commands["modules"].commands["status"], + ["FABRIC-CARD0"], obj=db) + print(result.exit_code) + print(result.output) + result_lines = result.output.strip('\n').split('\n') + assert result.exit_code == 0 + result_out = " ".join((result_lines[header_lines]).split()) + assert result_out.strip('\n') == show_fabriccard0_startup_output.strip('\n') + assert mock_run_command.call_count == 2 + def test_config_incorrect_module(self): runner = CliRunner() db = Db() diff --git a/tests/config_dpb_test.py b/tests/config_dpb_test.py index 58a24dc958..0a3d99cbcd 100644 --- a/tests/config_dpb_test.py +++ b/tests/config_dpb_test.py @@ -350,7 +350,7 @@ def test_config_breakout_extra_table_warning(self, breakout_cfg_file, sonic_db): commands["breakout"], ['{}'.format(interface), '{}'.format(newMode), '-v', '-y'], obj=obj) print(result.exit_code, result.output) - assert result.exit_code == 0 + assert result.exit_code == 1 assert 'Below Config can not be verified' in result.output assert 'UNKNOWN_TABLE' in result.output assert 'Do you wish to Continue?' in result.output @@ -396,7 +396,7 @@ def test_config_breakout_verbose(self, sonic_db): commands["breakout"], ['{}'.format(interface), '{}'.format(newMode), '-v', '-y'], obj=obj) print(result.exit_code, result.output) - assert result.exit_code == 0 + assert result.exit_code == 1 assert 'Dependencies Exist.' in result.output # verbose must be set while creating instance of ConfigMgmt class @@ -538,7 +538,7 @@ def config_dpb_port8_2x50G_1x100G(): commands["breakout"], ['{}'.format(interface), '{}'.format(newMode), '-v','-y'], obj=obj) print(result.exit_code, result.output) - assert result.exit_code == 0 + assert result.exit_code == 1 assert 'Dependencies Exist.' in result.output assert 'Printing dependencies' in result.output assert 'NO-NSW-PACL-V4' in result.output diff --git a/tests/config_save_output/all_config_db.json b/tests/config_save_output/all_config_db.json new file mode 100644 index 0000000000..17c3e7fc6c --- /dev/null +++ b/tests/config_save_output/all_config_db.json @@ -0,0 +1,5 @@ +{ + "localhost": {}, + "asic0": {}, + "asic1": {} +} \ No newline at end of file diff --git a/tests/config_snmp_test.py b/tests/config_snmp_test.py index 76f5675690..25c54d36ec 100644 --- a/tests/config_snmp_test.py +++ b/tests/config_snmp_test.py @@ -877,6 +877,34 @@ def test_config_snmp_community_add_new_community_with_invalid_type_yang_validati assert result.exit_code != 0 assert 'SNMP community configuration failed' in result.output + @patch('netifaces.interfaces', mock.Mock(return_value=['eth0'])) + @patch('netifaces.ifaddresses', mock.Mock(return_value={2: + [{'addr': '10.1.0.32', 'netmask': '255.255.255.0', + 'broadcast': '10.1.0.255'}], + 10: [{'addr': 'fe80::1%eth0', 'netmask': 'ffff:ffff:ffff:ffff::/64'}]})) + @patch('os.system', mock.Mock(return_value=0)) + def test_config_snmpagentaddress_add_linklocal(self): + db = Db() + obj = {'db': db.cfgdb} + runner = CliRunner() + runner.invoke(config.config.commands["snmpagentaddress"].commands["add"], ["fe80::1%eth0"], obj=obj) + assert ('fe80::1%eth0', '', '') in db.cfgdb.get_keys('SNMP_AGENT_ADDRESS_CONFIG') + assert db.cfgdb.get_entry("SNMP_AGENT_ADDRESS_CONFIG", "fe80::1%eth0||") == {} + + @patch('netifaces.interfaces', mock.Mock(return_value=['eth0'])) + @patch('netifaces.ifaddresses', mock.Mock(return_value={2: + [{'addr': '10.1.0.32', 'netmask': '255.255.255.0', + 'broadcast': '10.1.0.255'}], + 10: [{'addr': 'fe80::1', 'netmask': 'ffff:ffff:ffff:ffff::/64'}]})) + @patch('os.system', mock.Mock(return_value=0)) + def test_config_snmpagentaddress_add_ipv4(self): + db = Db() + obj = {'db': db.cfgdb} + runner = CliRunner() + runner.invoke(config.config.commands["snmpagentaddress"].commands["add"], ["10.1.0.32"], obj=obj) + assert ('10.1.0.32', '', '') in db.cfgdb.get_keys('SNMP_AGENT_ADDRESS_CONFIG') + assert db.cfgdb.get_entry("SNMP_AGENT_ADDRESS_CONFIG", "10.1.0.32||") == {} + @classmethod def teardown_class(cls): print("TEARDOWN") diff --git a/tests/config_test.py b/tests/config_test.py index 1054a52a33..74bc0e1093 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -1,3 +1,5 @@ +import copy +import datetime import pytest import filecmp import importlib @@ -16,6 +18,7 @@ from click.testing import CliRunner from sonic_py_common import device_info, multi_asic +from utilities_common import flock from utilities_common.db import Db from utilities_common.general import load_module_from_source from mock import call, patch, mock_open, MagicMock @@ -43,6 +46,23 @@ load_minigraph_platform_false_path = os.path.join(load_minigraph_input_path, "platform_false") load_minigraph_command_output="""\ +Acquired lock on {0} +Stopping SONiC target ... +Running command: /usr/local/bin/sonic-cfggen -H -m --write-to-db +Running command: config qos reload --no-dynamic-buffer --no-delay +Running command: pfcwd start_default +Restarting SONiC target ... +Reloading Monit configuration ... +Please note setting loaded from minigraph will be lost after system reboot. To preserve setting, run `config save`. +Released lock on {0} +""" + +load_minigraph_lock_failure_output = """\ +Failed to acquire lock on {0} +""" + +load_minigraph_command_bypass_lock_output = """\ +Bypass lock on {} Stopping SONiC target ... Running command: /usr/local/bin/sonic-cfggen -H -m --write-to-db Running command: config qos reload --no-dynamic-buffer --no-delay @@ -53,6 +73,7 @@ """ load_minigraph_platform_plugin_command_output="""\ +Acquired lock on {0} Stopping SONiC target ... Running command: /usr/local/bin/sonic-cfggen -H -m --write-to-db Running command: config qos reload --no-dynamic-buffer --no-delay @@ -61,6 +82,7 @@ Restarting SONiC target ... Reloading Monit configuration ... Please note setting loaded from minigraph will be lost after system reboot. To preserve setting, run `config save`. +Released lock on {0} """ load_mgmt_config_command_ipv4_only_output="""\ @@ -135,6 +157,20 @@ """ RELOAD_CONFIG_DB_OUTPUT = """\ +Acquired lock on {0} +Stopping SONiC target ... +Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json --write-to-db +Restarting SONiC target ... +Reloading Monit configuration ... +Released lock on {0} +""" + +RELOAD_CONFIG_DB_LOCK_FAILURE_OUTPUT = """\ +Failed to acquire lock on {0} +""" + +RELOAD_CONFIG_DB_BYPASS_LOCK_OUTPUT = """\ +Bypass lock on {0} Stopping SONiC target ... Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json --write-to-db Restarting SONiC target ... @@ -142,31 +178,114 @@ """ RELOAD_YANG_CFG_OUTPUT = """\ +Acquired lock on {0} Stopping SONiC target ... Running command: /usr/local/bin/sonic-cfggen -Y /tmp/config.json --write-to-db Restarting SONiC target ... Reloading Monit configuration ... +Released lock on {0} """ RELOAD_MASIC_CONFIG_DB_OUTPUT = """\ +Acquired lock on {0} Stopping SONiC target ... Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json --write-to-db Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json -n asic0 --write-to-db Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json -n asic1 --write-to-db Restarting SONiC target ... Reloading Monit configuration ... +Released lock on {0} """ reload_config_with_sys_info_command_output="""\ +Acquired lock on {0} Running command: /usr/local/bin/sonic-cfggen -H -k Seastone-DX010-25-50 --write-to-db""" reload_config_with_disabled_service_output="""\ +Acquired lock on {0} Stopping SONiC target ... Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json --write-to-db Restarting SONiC target ... Reloading Monit configuration ... +Released lock on {0} +""" + +reload_config_masic_onefile_output = """\ +Acquired lock on {0} +Stopping SONiC target ... +Restarting SONiC target ... +Reloading Monit configuration ... +Released lock on {0} +""" + +reload_config_masic_onefile_gen_sysinfo_output = """\ +Acquired lock on {0} +Stopping SONiC target ... +Running command: /usr/local/bin/sonic-cfggen -H -k Mellanox-SN3800-D112C8 --write-to-db +Running command: /usr/local/bin/sonic-cfggen -H -k multi_asic -n asic0 --write-to-db +Running command: /usr/local/bin/sonic-cfggen -H -k multi_asic -n asic1 --write-to-db +Restarting SONiC target ... +Reloading Monit configuration ... +Released lock on {0} +""" + +save_config_output = """\ +Running command: /usr/local/bin/sonic-cfggen -d --print-data > /etc/sonic/config_db.json +""" + +save_config_filename_output = """\ +Running command: /usr/local/bin/sonic-cfggen -d --print-data > /tmp/config_db.json +""" + +save_config_masic_output = """\ +Running command: /usr/local/bin/sonic-cfggen -d --print-data > /etc/sonic/config_db.json +Running command: /usr/local/bin/sonic-cfggen -n asic0 -d --print-data > /etc/sonic/config_db0.json +Running command: /usr/local/bin/sonic-cfggen -n asic1 -d --print-data > /etc/sonic/config_db1.json """ +save_config_filename_masic_output = """\ +Running command: /usr/local/bin/sonic-cfggen -d --print-data > config_db.json +Running command: /usr/local/bin/sonic-cfggen -n asic0 -d --print-data > config_db0.json +Running command: /usr/local/bin/sonic-cfggen -n asic1 -d --print-data > config_db1.json +""" + +save_config_onefile_masic_output = """\ +Integrate each ASIC's config into a single JSON file /tmp/all_config_db.json. +""" + +config_temp = { + "scope": { + "ACL_TABLE": { + "MY_ACL_TABLE": { + "policy_desc": "My ACL", + "ports": ["Ethernet1", "Ethernet2"], + "stage": "ingress", + "type": "L3" + } + }, + "PORT": { + "Ethernet1": { + "alias": "fortyGigE0/0", + "description": "fortyGigE0/0", + "index": "0", + "lanes": "29,30,31,32", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + }, + "Ethernet2": { + "alias": "fortyGigE0/100", + "description": "fortyGigE0/100", + "index": "25", + "lanes": "125,126,127,128", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + } + } + } + } + def mock_run_command_side_effect(*args, **kwargs): command = args[0] if isinstance(command, str): @@ -186,6 +305,10 @@ def mock_run_command_side_effect(*args, **kwargs): return 'enabled', 0 elif command == 'cat /var/run/dhclient.eth0.pid': return '101', 0 + elif command == 'sudo systemctl show --no-pager interfaces-config -p ExecMainExitTimestamp --value': + return f'{datetime.datetime.now()}', 0 + elif command == 'sudo systemctl show --no-pager networking -p ExecMainExitTimestamp --value': + return f'{datetime.datetime.now()}', 0 else: return '', 0 @@ -299,6 +422,191 @@ def test_plattform_fw_update(self, mock_check_call): assert result.exit_code == 0 mock_check_call.assert_called_with(["fwutil", "update", 'update', 'module', 'Module1', 'component', 'BIOS', 'fw']) + +class TestConfigSave(object): + @classmethod + def setup_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "1" + print("SETUP") + import config.main + importlib.reload(config.main) + + def test_config_save(self, get_cmd_module, setup_single_broadcom_asic): + def read_json_file_side_effect(filename): + return {} + + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)),\ + mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)),\ + mock.patch('config.main.open', + mock.MagicMock()): + (config, show) = get_cmd_module + + runner = CliRunner() + + result = runner.invoke(config.config.commands["save"], ["-y"]) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + + assert result.exit_code == 0 + assert "\n".join([li.rstrip() for li in result.output.split('\n')]) == save_config_output + + def test_config_save_filename(self, get_cmd_module, setup_single_broadcom_asic): + def read_json_file_side_effect(filename): + return {} + + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)),\ + mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)),\ + mock.patch('config.main.open', + mock.MagicMock()): + + (config, show) = get_cmd_module + + runner = CliRunner() + + output_file = os.path.join(os.sep, "tmp", "config_db.json") + result = runner.invoke(config.config.commands["save"], ["-y", output_file]) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + + assert result.exit_code == 0 + assert "\n".join([li.rstrip() for li in result.output.split('\n')]) == save_config_filename_output + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ['UTILITIES_UNIT_TESTING'] = "0" + + +class TestConfigSaveMasic(object): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ['UTILITIES_UNIT_TESTING'] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + import config.main + importlib.reload(config.main) + # change to multi asic config + from .mock_tables import dbconnector + from .mock_tables import mock_multi_asic + importlib.reload(mock_multi_asic) + dbconnector.load_namespace_config() + + def test_config_save_masic(self): + def read_json_file_side_effect(filename): + return {} + + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)),\ + mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)),\ + mock.patch('config.main.open', + mock.MagicMock()): + + runner = CliRunner() + + result = runner.invoke(config.config.commands["save"], ["-y"]) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + + assert result.exit_code == 0 + assert "\n".join([li.rstrip() for li in result.output.split('\n')]) == save_config_masic_output + + def test_config_save_filename_masic(self): + def read_json_file_side_effect(filename): + return {} + + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)),\ + mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)),\ + mock.patch('config.main.open', + mock.MagicMock()): + + runner = CliRunner() + + result = runner.invoke( + config.config.commands["save"], + ["-y", "config_db.json,config_db0.json,config_db1.json"] + ) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + + assert result.exit_code == 0 + assert "\n".join([li.rstrip() for li in result.output.split('\n')]) == save_config_filename_masic_output + + def test_config_save_filename_wrong_cnt_masic(self): + def read_json_file_side_effect(filename): + return {} + + with mock.patch('config.main.read_json_file', mock.MagicMock(side_effect=read_json_file_side_effect)): + + runner = CliRunner() + + result = runner.invoke( + config.config.commands["save"], + ["-y", "config_db.json,config_db0.json"] + ) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + + assert "Input 3 config file(s) separated by comma for multiple files" in result.output + + def test_config_save_onefile_masic(self): + def get_config_side_effect(): + return {} + + with mock.patch('swsscommon.swsscommon.ConfigDBConnector.get_config', + mock.MagicMock(side_effect=get_config_side_effect)): + runner = CliRunner() + + output_file = os.path.join(os.sep, "tmp", "all_config_db.json") + print("Saving output in {}".format(output_file)) + try: + os.remove(output_file) + except OSError: + pass + result = runner.invoke( + config.config.commands["save"], + ["-y", output_file] + ) + + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert "\n".join([li.rstrip() for li in result.output.split('\n')]) == save_config_onefile_masic_output + + cwd = os.path.dirname(os.path.realpath(__file__)) + expected_result = os.path.join( + cwd, "config_save_output", "all_config_db.json" + ) + assert filecmp.cmp(output_file, expected_result, shallow=False) + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ['UTILITIES_UNIT_TESTING'] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + # change back to single asic config + from .mock_tables import dbconnector + from .mock_tables import mock_single_asic + importlib.reload(mock_single_asic) + dbconnector.load_namespace_config() + + class TestConfigReload(object): dummy_cfg_file = os.path.join(os.sep, "tmp", "config.json") @@ -338,7 +646,8 @@ def test_config_reload(self, get_cmd_module, setup_single_broadcom_asic): assert result.exit_code == 0 - assert "\n".join([l.rstrip() for l in result.output.split('\n')][:1]) == reload_config_with_sys_info_command_output + assert "\n".join([line.rstrip() for line in result.output.split('\n')][:2]) == \ + reload_config_with_sys_info_command_output.format(config.SYSTEM_RELOAD_LOCK) def test_config_reload_stdin(self, get_cmd_module, setup_single_broadcom_asic): def mock_json_load(f): @@ -378,7 +687,8 @@ def mock_json_load(f): assert result.exit_code == 0 - assert "\n".join([l.rstrip() for l in result.output.split('\n')][:1]) == reload_config_with_sys_info_command_output + assert "\n".join([line.rstrip() for line in result.output.split('\n')][:2]) == \ + reload_config_with_sys_info_command_output.format(config.SYSTEM_RELOAD_LOCK) @classmethod def teardown_class(cls): @@ -392,6 +702,212 @@ def teardown_class(cls): dbconnector.load_namespace_config() +class TestConfigReloadMasic(object): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ['UTILITIES_UNIT_TESTING'] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + import config.main + importlib.reload(config.main) + # change to multi asic config + from .mock_tables import dbconnector + from .mock_tables import mock_multi_asic + importlib.reload(mock_multi_asic) + dbconnector.load_namespace_config() + + def test_config_reload_onefile_masic(self): + def read_json_file_side_effect(filename): + return { + "localhost": { + "DEVICE_METADATA": { + "localhost": { + "default_bgp_status": "down", + "default_pfcwd_status": "enable", + "deployment_id": "1", + "docker_routing_config_mode": "separated", + "hostname": "sonic-switch", + "hwsku": "Mellanox-SN3800-D112C8", + "mac": "1d:34:db:16:a6:00", + "platform": "x86_64-mlnx_msn3800-r0", + "peer_switch": "sonic-switch", + "type": "ToRRouter", + "suppress-fib-pending": "enabled" + } + } + }, + "asic0": { + "DEVICE_METADATA": { + "localhost": { + "asic_id": "01.00.0", + "asic_name": "asic0", + "bgp_asn": "65100", + "cloudtype": "None", + "default_bgp_status": "down", + "default_pfcwd_status": "enable", + "deployment_id": "None", + "docker_routing_config_mode": "separated", + "hostname": "sonic", + "hwsku": "multi_asic", + "mac": "02:42:f0:7f:01:05", + "platform": "multi_asic", + "region": "None", + "sub_role": "FrontEnd", + "type": "LeafRouter" + } + } + }, + "asic1": { + "DEVICE_METADATA": { + "localhost": { + "asic_id": "08:00.0", + "asic_name": "asic1", + "bgp_asn": "65100", + "cloudtype": "None", + "default_bgp_status": "down", + "default_pfcwd_status": "enable", + "deployment_id": "None", + "docker_routing_config_mode": "separated", + "hostname": "sonic", + "hwsku": "multi_asic", + "mac": "02:42:f0:7f:01:06", + "platform": "multi_asic", + "region": "None", + "sub_role": "BackEnd", + "type": "LeafRouter" + } + } + } + } + + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)),\ + mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)): + + runner = CliRunner() + + result = runner.invoke(config.config.commands["reload"], ["-y", "-f", "all_config_db.json"]) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + + assert result.exit_code == 0 + assert "\n".join([li.rstrip() for li in result.output.split('\n')]) == \ + reload_config_masic_onefile_output.format(config.SYSTEM_RELOAD_LOCK) + + def test_config_reload_onefile_gen_sysinfo_masic(self): + def read_json_file_side_effect(filename): + return { + "localhost": { + "DEVICE_METADATA": { + "localhost": { + "default_bgp_status": "down", + "default_pfcwd_status": "enable", + "deployment_id": "1", + "docker_routing_config_mode": "separated", + "hostname": "sonic-switch", + "hwsku": "Mellanox-SN3800-D112C8", + "peer_switch": "sonic-switch", + "type": "ToRRouter", + "suppress-fib-pending": "enabled" + } + } + }, + "asic0": { + "DEVICE_METADATA": { + "localhost": { + "asic_id": "01.00.0", + "asic_name": "asic0", + "bgp_asn": "65100", + "cloudtype": "None", + "default_bgp_status": "down", + "default_pfcwd_status": "enable", + "deployment_id": "None", + "docker_routing_config_mode": "separated", + "hostname": "sonic", + "hwsku": "multi_asic", + "region": "None", + "sub_role": "FrontEnd", + "type": "LeafRouter" + } + } + }, + "asic1": { + "DEVICE_METADATA": { + "localhost": { + "asic_id": "08:00.0", + "asic_name": "asic1", + "bgp_asn": "65100", + "cloudtype": "None", + "default_bgp_status": "down", + "default_pfcwd_status": "enable", + "deployment_id": "None", + "docker_routing_config_mode": "separated", + "hostname": "sonic", + "hwsku": "multi_asic", + "region": "None", + "sub_role": "BackEnd", + "type": "LeafRouter" + } + } + } + } + + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)),\ + mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)): + + runner = CliRunner() + + result = runner.invoke(config.config.commands["reload"], ["-y", "-f", "all_config_db.json"]) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + + assert result.exit_code == 0 + assert "\n".join( + [li.rstrip() for li in result.output.split('\n')] + ) == reload_config_masic_onefile_gen_sysinfo_output.format(config.SYSTEM_RELOAD_LOCK) + + def test_config_reload_onefile_bad_format_masic(self): + def read_json_file_side_effect(filename): + return { + "localhost": {}, + "asic0": {} + } + + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)),\ + mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)): + + runner = CliRunner() + + result = runner.invoke(config.config.commands["reload"], ["-y", "-f", "all_config_db.json"]) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + + assert result.exit_code != 0 + assert "Input file all_config_db.json must contain all asics config" in result.output + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ['UTILITIES_UNIT_TESTING'] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + # change back to single asic config + from .mock_tables import dbconnector + from .mock_tables import mock_single_asic + importlib.reload(mock_single_asic) + dbconnector.load_namespace_config() + + class TestLoadMinigraph(object): @classmethod def setup_class(cls): @@ -410,10 +926,57 @@ def test_load_minigraph(self, get_cmd_module, setup_single_broadcom_asic): print(result.output) traceback.print_tb(result.exc_info[2]) assert result.exit_code == 0 - assert "\n".join([l.rstrip() for l in result.output.split('\n')]) == load_minigraph_command_output + assert "\n".join([line.rstrip() for line in result.output.split('\n')]) == \ + (load_minigraph_command_output.format(config.SYSTEM_RELOAD_LOCK)) # Verify "systemctl reset-failed" is called for services under sonic.target mock_run_command.assert_any_call(['systemctl', 'reset-failed', 'swss']) - assert mock_run_command.call_count == 8 + assert mock_run_command.call_count == 12 + + @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', + mock.MagicMock(return_value=("dummy_path", None))) + def test_load_minigraph_lock_failure(self, get_cmd_module, setup_single_broadcom_asic): + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: + (config, _) = get_cmd_module + + fd = open(config.SYSTEM_RELOAD_LOCK, 'r') + assert flock.acquire_flock(fd, 0) + + try: + runner = CliRunner() + result = runner.invoke(config.config.commands["load_minigraph"], ["-y"]) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code != 0 + assert result.output == \ + (load_minigraph_lock_failure_output.format(config.SYSTEM_RELOAD_LOCK)) + assert mock_run_command.call_count == 0 + finally: + flock.release_flock(fd) + + @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', + mock.MagicMock(return_value=("dummy_path", None))) + def test_load_minigraph_bypass_lock(self, get_cmd_module, setup_single_broadcom_asic): + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: + (config, _) = get_cmd_module + + fd = open(config.SYSTEM_RELOAD_LOCK, 'r') + assert flock.acquire_flock(fd, 0) + + try: + runner = CliRunner() + result = runner.invoke(config.config.commands["load_minigraph"], ["-y", "-b"]) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code == 0 + assert result.output == \ + load_minigraph_command_bypass_lock_output.format(config.SYSTEM_RELOAD_LOCK) + assert mock_run_command.call_count == 12 + finally: + flock.release_flock(fd) @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', mock.MagicMock(return_value=(load_minigraph_platform_path, None))) def test_load_minigraph_platform_plugin(self, get_cmd_module, setup_single_broadcom_asic): @@ -425,10 +988,11 @@ def test_load_minigraph_platform_plugin(self, get_cmd_module, setup_single_broad print(result.output) traceback.print_tb(result.exc_info[2]) assert result.exit_code == 0 - assert "\n".join([l.rstrip() for l in result.output.split('\n')]) == load_minigraph_platform_plugin_command_output + assert "\n".join([line.rstrip() for line in result.output.split('\n')]) == \ + (load_minigraph_platform_plugin_command_output.format(config.SYSTEM_RELOAD_LOCK)) # Verify "systemctl reset-failed" is called for services under sonic.target mock_run_command.assert_any_call(['systemctl', 'reset-failed', 'swss']) - assert mock_run_command.call_count == 8 + assert mock_run_command.call_count == 12 @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', mock.MagicMock(return_value=(load_minigraph_platform_false_path, None))) def test_load_minigraph_platform_plugin_fail(self, get_cmd_module, setup_single_broadcom_asic): @@ -519,8 +1083,13 @@ def is_file_side_effect(filename): def test_load_minigraph_with_specified_golden_config_path(self, get_cmd_module): def is_file_side_effect(filename): return True if 'golden_config' in filename else False + + def read_json_file_side_effect(filename): + return {} + with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command, \ - mock.patch('os.path.isfile', mock.MagicMock(side_effect=is_file_side_effect)): + mock.patch('os.path.isfile', mock.MagicMock(side_effect=is_file_side_effect)), \ + mock.patch('config.main.read_json_file', mock.MagicMock(side_effect=read_json_file_side_effect)): (config, show) = get_cmd_module runner = CliRunner() result = runner.invoke(config.config.commands["load_minigraph"], ["--override_config", "--golden_config_path", "golden_config.json", "-y"]) @@ -531,14 +1100,48 @@ def is_file_side_effect(filename): def test_load_minigraph_with_default_golden_config_path(self, get_cmd_module): def is_file_side_effect(filename): return True if 'golden_config' in filename else False + + def read_json_file_side_effect(filename): + return {} + with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command, \ - mock.patch('os.path.isfile', mock.MagicMock(side_effect=is_file_side_effect)): + mock.patch('os.path.isfile', mock.MagicMock(side_effect=is_file_side_effect)), \ + mock.patch('config.main.read_json_file', mock.MagicMock(side_effect=read_json_file_side_effect)): (config, show) = get_cmd_module runner = CliRunner() result = runner.invoke(config.config.commands["load_minigraph"], ["--override_config", "-y"]) assert result.exit_code == 0 assert "config override-config-table /etc/sonic/golden_config_db.json" in result.output + @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', + mock.MagicMock(return_value=("dummy_path", None))) + def test_load_minigraph_hard_dependency_check(self, get_cmd_module): + def is_file_side_effect(filename): + return True if 'golden_config' in filename else False + + def read_json_file_side_effect(filename): + return { + "AAA": { + "authentication": { + "login": "tacacs+" + } + }, + "TACPLUS": { + "global": { + "passkey": "" + } + } + } + + with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)), \ + mock.patch('os.path.isfile', mock.MagicMock(side_effect=is_file_side_effect)), \ + mock.patch('config.main.read_json_file', mock.MagicMock(side_effect=read_json_file_side_effect)): + (config, _) = get_cmd_module + runner = CliRunner() + result = runner.invoke(config.config.commands["load_minigraph"], ["--override_config", "-y"]) + assert result.exit_code != 0 + assert "Authentication with 'tacacs+' is not allowed when passkey not exits." in result.output + @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', mock.MagicMock(return_value=("dummy_path", None))) def test_load_minigraph_with_traffic_shift_away(self, get_cmd_module): with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: @@ -556,7 +1159,12 @@ def test_load_minigraph_with_traffic_shift_away_with_golden_config(self, get_cmd with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: def is_file_side_effect(filename): return True if 'golden_config' in filename else False - with mock.patch('os.path.isfile', mock.MagicMock(side_effect=is_file_side_effect)): + + def read_json_file_side_effect(filename): + return {} + + with mock.patch('os.path.isfile', mock.MagicMock(side_effect=is_file_side_effect)), \ + mock.patch('config.main.read_json_file', mock.MagicMock(side_effect=read_json_file_side_effect)): (config, show) = get_cmd_module db = Db() golden_config = {} @@ -641,25 +1249,77 @@ def test_reload_config_no_sysinfo(self, get_cmd_module, setup_single_broadcom_as traceback.print_tb(result.exc_info[2]) assert result.exit_code == 0 - def test_reload_config(self, get_cmd_module, setup_single_broadcom_asic): + def test_reload_config(self, get_cmd_module, setup_single_broadcom_asic): + self.add_sysinfo_to_cfg_file() + with mock.patch( + "utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect) + ) as mock_run_command: + (config, show) = get_cmd_module + runner = CliRunner() + + result = runner.invoke( + config.config.commands["reload"], + [self.dummy_cfg_file, '-y', '-f']) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code == 0 + assert "\n".join([l.rstrip() for l in result.output.split('\n')]) \ + == RELOAD_CONFIG_DB_OUTPUT.format(config.SYSTEM_RELOAD_LOCK) + + def test_reload_config_lock_failure(self, get_cmd_module, setup_single_broadcom_asic): + self.add_sysinfo_to_cfg_file() + with mock.patch( + "utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect) + ): + (config, show) = get_cmd_module + runner = CliRunner() + + fd = open(config.SYSTEM_RELOAD_LOCK, 'r') + assert flock.acquire_flock(fd, 0) + + try: + result = runner.invoke( + config.config.commands["reload"], + [self.dummy_cfg_file, '-y', '-f']) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code != 0 + assert "\n".join([line.rstrip() for line in result.output.split('\n')]) \ + == RELOAD_CONFIG_DB_LOCK_FAILURE_OUTPUT.format(config.SYSTEM_RELOAD_LOCK) + finally: + flock.release_flock(fd) + + def test_reload_config_bypass_lock(self, get_cmd_module, setup_single_broadcom_asic): self.add_sysinfo_to_cfg_file() with mock.patch( "utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect) - ) as mock_run_command: + ): (config, show) = get_cmd_module runner = CliRunner() - result = runner.invoke( - config.config.commands["reload"], - [self.dummy_cfg_file, '-y', '-f']) + fd = open(config.SYSTEM_RELOAD_LOCK, 'r') + assert flock.acquire_flock(fd, 0) - print(result.exit_code) - print(result.output) - traceback.print_tb(result.exc_info[2]) - assert result.exit_code == 0 - assert "\n".join([l.rstrip() for l in result.output.split('\n')]) \ - == RELOAD_CONFIG_DB_OUTPUT + try: + result = runner.invoke( + config.config.commands["reload"], + [self.dummy_cfg_file, '-y', '-f', '-b']) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code == 0 + assert "\n".join([line.rstrip() for line in result.output.split('\n')]) \ + == RELOAD_CONFIG_DB_BYPASS_LOCK_OUTPUT.format(config.SYSTEM_RELOAD_LOCK) + finally: + flock.release_flock(fd) def test_config_reload_disabled_service(self, get_cmd_module, setup_single_broadcom_asic): self.add_sysinfo_to_cfg_file() @@ -679,7 +1339,8 @@ def test_config_reload_disabled_service(self, get_cmd_module, setup_single_broad assert result.exit_code == 0 - assert "\n".join([l.rstrip() for l in result.output.split('\n')]) == reload_config_with_disabled_service_output + assert "\n".join([line.rstrip() for line in result.output.split('\n')]) == \ + reload_config_with_disabled_service_output.format(config.SYSTEM_RELOAD_LOCK) def test_reload_config_masic(self, get_cmd_module, setup_multi_broadcom_masic): self.add_sysinfo_to_cfg_file() @@ -703,7 +1364,7 @@ def test_reload_config_masic(self, get_cmd_module, setup_multi_broadcom_masic): traceback.print_tb(result.exc_info[2]) assert result.exit_code == 0 assert "\n".join([l.rstrip() for l in result.output.split('\n')]) \ - == RELOAD_MASIC_CONFIG_DB_OUTPUT + == RELOAD_MASIC_CONFIG_DB_OUTPUT.format(config.SYSTEM_RELOAD_LOCK) def test_reload_yang_config(self, get_cmd_module, setup_single_broadcom_asic): @@ -722,7 +1383,7 @@ def test_reload_yang_config(self, get_cmd_module, traceback.print_tb(result.exc_info[2]) assert result.exit_code == 0 assert "\n".join([l.rstrip() for l in result.output.split('\n')]) \ - == RELOAD_YANG_CFG_OUTPUT + == RELOAD_YANG_CFG_OUTPUT.format(config.SYSTEM_RELOAD_LOCK) @classmethod def teardown_class(cls): @@ -1023,6 +1684,7 @@ def setUp(self): self.any_checkpoints_list = ["checkpoint1", "checkpoint2", "checkpoint3"] self.any_checkpoints_list_as_text = json.dumps(self.any_checkpoints_list, indent=4) + @patch('config.main.validate_patch', mock.Mock(return_value=True)) def test_apply_patch__no_params__get_required_params_error_msg(self): # Arrange unexpected_exit_code = 0 @@ -1035,6 +1697,7 @@ def test_apply_patch__no_params__get_required_params_error_msg(self): self.assertNotEqual(unexpected_exit_code, result.exit_code) self.assertTrue(expected_output in result.output) + @patch('config.main.validate_patch', mock.Mock(return_value=True)) def test_apply_patch__help__gets_help_msg(self): # Arrange expected_exit_code = 0 @@ -1047,6 +1710,7 @@ def test_apply_patch__help__gets_help_msg(self): self.assertEqual(expected_exit_code, result.exit_code) self.assertTrue(expected_output in result.output) + @patch('config.main.validate_patch', mock.Mock(return_value=True)) def test_apply_patch__only_required_params__default_values_used_for_optional_params(self): # Arrange expected_exit_code = 0 @@ -1065,6 +1729,7 @@ def test_apply_patch__only_required_params__default_values_used_for_optional_par mock_generic_updater.apply_patch.assert_called_once() mock_generic_updater.apply_patch.assert_has_calls([expected_call_with_default_values]) + @patch('config.main.validate_patch', mock.Mock(return_value=True)) def test_apply_patch__all_optional_params_non_default__non_default_values_used(self): # Arrange expected_exit_code = 0 @@ -1094,6 +1759,7 @@ def test_apply_patch__all_optional_params_non_default__non_default_values_used(s mock_generic_updater.apply_patch.assert_called_once() mock_generic_updater.apply_patch.assert_has_calls([expected_call_with_non_default_values]) + @patch('config.main.validate_patch', mock.Mock(return_value=True)) def test_apply_patch__exception_thrown__error_displayed_error_code_returned(self): # Arrange unexpected_exit_code = 0 @@ -1129,6 +1795,7 @@ def test_apply_patch__optional_parameters_passed_correctly(self): ["--ignore-path", "/ANY_TABLE"], mock.call(self.any_patch, ConfigFormat.CONFIGDB, False, False, False, ("/ANY_TABLE",))) + @patch('config.main.validate_patch', mock.Mock(return_value=True)) def validate_apply_patch_optional_parameter(self, param_args, expected_call): # Arrange expected_exit_code = 0 @@ -2651,6 +3318,7 @@ def setUp(self): self.runner = CliRunner() self.patch_file_path = 'path/to/patch.json' + self.replace_file_path = 'path/to/replace.json' self.patch_content = [ { "op": "add", @@ -2679,6 +3347,16 @@ def setUp(self): } ] + test_config = copy.deepcopy(config_temp) + data = test_config.pop("scope") + self.all_config = {} + self.all_config["localhost"] = data + self.all_config["asic0"] = data + self.all_config["asic0"]["bgpraw"] = "" + self.all_config["asic1"] = data + self.all_config["asic1"]["bgpraw"] = "" + + @patch('config.main.validate_patch', mock.Mock(return_value=True)) def test_apply_patch_multiasic(self): # Mock open to simulate file reading with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: @@ -2698,7 +3376,201 @@ def test_apply_patch_multiasic(self): # Verify mocked_open was called as expected mocked_open.assert_called_with(self.patch_file_path, 'r') + @patch('config.main.validate_patch', mock.Mock(return_value=True)) def test_apply_patch_dryrun_multiasic(self): + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + # Mock ConfigDBConnector to ensure it's not called during dry-run + with patch('config.main.ConfigDBConnector') as mock_config_db_connector: + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path, + "--format", ConfigFormat.SONICYANG.name, + "--dry-run", + "--ignore-non-yang-tables", + "--ignore-path", "/ANY_TABLE", + "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", + "--ignore-path", "", + "--verbose"], + catch_exceptions=False) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Patch applied successfully.", result.output) + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + # Ensure ConfigDBConnector was never instantiated or called + mock_config_db_connector.assert_not_called() + + @patch('config.main.validate_patch', mock.Mock(return_value=True)) + @patch('config.main.concurrent.futures.wait', autospec=True) + def test_apply_patch_dryrun_parallel_multiasic(self, MockThreadPoolWait): + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + # Mock ConfigDBConnector to ensure it's not called during dry-run + with patch('config.main.ConfigDBConnector') as mock_config_db_connector: + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path, + "--format", ConfigFormat.SONICYANG.name, + "--dry-run", + "--parallel", + "--ignore-non-yang-tables", + "--ignore-path", "/ANY_TABLE", + "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", + "--ignore-path", "", + "--verbose"], + catch_exceptions=False) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Patch applied successfully.", result.output) + + # Assertions to check if ThreadPoolExecutor was used correctly + MockThreadPoolWait.assert_called_once() + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + # Ensure ConfigDBConnector was never instantiated or called + mock_config_db_connector.assert_not_called() + + @patch('config.main.validate_patch', mock.Mock(return_value=True)) + @patch('config.main.concurrent.futures.wait', autospec=True) + def test_apply_patch_check_running_in_parallel_multiasic(self, MockThreadPoolWait): + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + # Mock ConfigDBConnector to ensure it's not called during dry-run + with patch('config.main.ConfigDBConnector') as mock_config_db_connector: + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path, + "--format", ConfigFormat.SONICYANG.name, + "--parallel", + "--ignore-non-yang-tables", + "--ignore-path", "/ANY_TABLE", + "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", + "--ignore-path", "", + "--verbose"], + catch_exceptions=False) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Patch applied successfully.", result.output) + + # Assertions to check if ThreadPoolExecutor was used correctly + MockThreadPoolWait.assert_called_once() + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + # Ensure ConfigDBConnector was never instantiated or called + mock_config_db_connector.assert_not_called() + + @patch('config.main.validate_patch', mock.Mock(return_value=True)) + @patch('config.main.apply_patch_wrapper') + def test_apply_patch_check_apply_call_parallel_multiasic(self, mock_apply_patch): + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + # Mock ConfigDBConnector to ensure it's not called during dry-run + with patch('config.main.ConfigDBConnector') as mock_config_db_connector: + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path, + "--format", ConfigFormat.SONICYANG.name, + "--parallel", + "--ignore-non-yang-tables", + "--ignore-path", "/ANY_TABLE", + "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", + "--ignore-path", "", + "--verbose"], + catch_exceptions=False) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Patch applied successfully.", result.output) + + # Assertions to check if ThreadPoolExecutor was used correctly + self.assertEqual(mock_apply_patch.call_count, + multi_asic.get_num_asics() + 1, + "apply_patch_wrapper function should be called number of ASICs plus host times") + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + # Ensure ConfigDBConnector was never instantiated or called + mock_config_db_connector.assert_not_called() + + @patch('config.main.validate_patch', mock.Mock(return_value=True)) + @patch('config.main.concurrent.futures.wait', autospec=True) + def test_apply_patch_check_running_in_not_parallel_multiasic(self, MockThreadPoolWait): + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + # Mock ConfigDBConnector to ensure it's not called during dry-run + with patch('config.main.ConfigDBConnector') as mock_config_db_connector: + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path, + "--format", ConfigFormat.SONICYANG.name, + "--ignore-non-yang-tables", + "--ignore-path", "/ANY_TABLE", + "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", + "--ignore-path", "", + "--verbose"], + catch_exceptions=False) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Patch applied successfully.", result.output) + + # Assertions to check if ThreadPoolExecutor was used correctly + MockThreadPoolWait.assert_not_called() + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + # Ensure ConfigDBConnector was never instantiated or called + mock_config_db_connector.assert_not_called() + + @patch('config.main.validate_patch', mock.Mock(return_value=True)) + def test_apply_patch_parallel_with_error_multiasic(self): # Mock open to simulate file reading with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: # Mock GenericUpdater to avoid actual patch application @@ -2713,12 +3585,13 @@ def test_apply_patch_dryrun_multiasic(self): result = self.runner.invoke(config.config.commands["apply-patch"], [self.patch_file_path, "--format", ConfigFormat.SONICYANG.name, - "--dry-run", - "--ignore-non-yang-tables", - "--ignore-path", "/ANY_TABLE", - "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", - "--ignore-path", "", - "--verbose"], + "--dry-run", + "--parallel", + "--ignore-non-yang-tables", + "--ignore-path", "/ANY_TABLE", + "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", + "--ignore-path", "", + "--verbose"], catch_exceptions=False) print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) @@ -2732,6 +3605,269 @@ def test_apply_patch_dryrun_multiasic(self): # Ensure ConfigDBConnector was never instantiated or called mock_config_db_connector.assert_not_called() + @patch('config.main.subprocess.Popen') + @patch('config.main.SonicYangCfgDbGenerator.validate_config_db_json', mock.Mock(return_value=True)) + def test_apply_patch_validate_patch_multiasic(self, mock_subprocess_popen): + mock_instance = MagicMock() + mock_instance.communicate.return_value = (json.dumps(self.all_config), 0) + mock_subprocess_popen.return_value = mock_instance + + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed.") + self.assertIn("Patch applied successfully.", result.output) + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + @patch('config.main.subprocess.Popen') + @patch('config.main.SonicYangCfgDbGenerator.validate_config_db_json', mock.Mock(return_value=True)) + def test_apply_patch_validate_patch_with_badpath_multiasic(self, mock_subprocess_popen): + mock_instance = MagicMock() + mock_instance.communicate.return_value = (json.dumps(self.all_config), 0) + mock_subprocess_popen.return_value = mock_instance + + bad_patch = copy.deepcopy(self.patch_content) + bad_patch.append({ + "value": { + "policy_desc": "New ACL Table", + "ports": ["Ethernet3", "Ethernet4"], + "stage": "ingress", + "type": "L3" + } + }) + + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(bad_patch)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertNotEqual(result.exit_code, 0, "Command should failed.") + self.assertIn("Failed to apply patch", result.output) + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + @patch('config.main.subprocess.Popen') + @patch('config.main.SonicYangCfgDbGenerator.validate_config_db_json', mock.Mock(return_value=True)) + def test_apply_patch_parallel_badpath_multiasic(self, mock_subprocess_popen): + mock_instance = MagicMock() + mock_instance.communicate.return_value = (json.dumps(self.all_config), 0) + mock_subprocess_popen.return_value = mock_instance + + bad_patch = copy.deepcopy(self.patch_content) + bad_patch.append({ + "value": { + "policy_desc": "New ACL Table", + "ports": ["Ethernet3", "Ethernet4"], + "stage": "ingress", + "type": "L3" + } + }) + + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(bad_patch)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path, + "--parallel"], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertNotEqual(result.exit_code, 0, "Command should failed.") + self.assertIn("Failed to apply patch", result.output) + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + @patch('config.main.subprocess.Popen') + @patch('config.main.SonicYangCfgDbGenerator.validate_config_db_json', mock.Mock(return_value=True)) + def test_apply_patch_validate_patch_with_wrong_fetch_config(self, mock_subprocess_popen): + mock_instance = MagicMock() + mock_instance.communicate.return_value = (json.dumps(self.all_config), 2) + mock_subprocess_popen.return_value = mock_instance + + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertNotEqual(result.exit_code, 0, "Command should failed.") + self.assertIn("Failed to apply patch", result.output) + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + @patch('generic_config_updater.generic_updater.ConfigReplacer.replace', MagicMock()) + def test_replace_multiasic(self): + # Mock open to simulate file reading + mock_replace_content = copy.deepcopy(self.all_config) + with patch('builtins.open', mock_open(read_data=json.dumps(mock_replace_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.replace_all = MagicMock() + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["replace"], + [self.replace_file_path], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Config replaced successfully.", result.output) + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.replace_file_path, 'r') + + @patch('generic_config_updater.generic_updater.ConfigReplacer.replace', MagicMock()) + def test_replace_multiasic_missing_scope(self): + # Mock open to simulate file reading + mock_replace_content = copy.deepcopy(self.all_config) + mock_replace_content.pop("asic0") + with patch('builtins.open', mock_open(read_data=json.dumps(mock_replace_content)), create=True): + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["replace"], + [self.replace_file_path], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertNotEqual(result.exit_code, 0, "Command should failed") + self.assertIn("Failed to replace config", result.output) + + @patch('generic_config_updater.generic_updater.subprocess.Popen') + @patch('generic_config_updater.generic_updater.Util.ensure_checkpoints_dir_exists', mock.Mock(return_value=True)) + @patch('generic_config_updater.generic_updater.Util.save_json_file', MagicMock()) + def test_checkpoint_multiasic(self, mock_subprocess_popen): + allconfigs = copy.deepcopy(self.all_config) + + # Create mock instances for each subprocess call + mock_instance_localhost = MagicMock() + mock_instance_localhost.communicate.return_value = (json.dumps(allconfigs["localhost"]), 0) + mock_instance_localhost.returncode = 0 + + mock_instance_asic0 = MagicMock() + mock_instance_asic0.communicate.return_value = (json.dumps(allconfigs["asic0"]), 0) + mock_instance_asic0.returncode = 0 + + mock_instance_asic1 = MagicMock() + mock_instance_asic1.communicate.return_value = (json.dumps(allconfigs["asic1"]), 0) + mock_instance_asic1.returncode = 0 + + # Setup side effect to return different mock instances based on input arguments + def side_effect(*args, **kwargs): + if "asic" not in args[0]: + return mock_instance_localhost + elif "asic0" in args[0]: + return mock_instance_asic0 + elif "asic1" in args[0]: + return mock_instance_asic1 + else: + return MagicMock() # Default case + + mock_subprocess_popen.side_effect = side_effect + + checkpointname = "checkpointname" + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["checkpoint"], + [checkpointname], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Checkpoint created successfully.", result.output) + + @patch('generic_config_updater.generic_updater.Util.check_checkpoint_exists', mock.Mock(return_value=True)) + @patch('generic_config_updater.generic_updater.ConfigReplacer.replace', MagicMock()) + @patch('generic_config_updater.generic_updater.Util.get_checkpoint_content') + def test_rollback_multiasic(self, mock_get_checkpoint_content): + mock_get_checkpoint_content.return_value = copy.deepcopy(self.all_config) + checkpointname = "checkpointname" + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["rollback"], + [checkpointname], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Config rolled back successfully.", result.output) + + @patch('generic_config_updater.generic_updater.Util.checkpoints_dir_exist', mock.Mock(return_value=True)) + @patch('generic_config_updater.generic_updater.Util.get_checkpoint_names', + mock.Mock(return_value=["checkpointname"])) + def test_list_checkpoint_multiasic(self): + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["list-checkpoints"], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("checkpointname", result.output) + + @patch('generic_config_updater.generic_updater.Util.delete_checkpoint', MagicMock()) + @patch('generic_config_updater.generic_updater.Util.check_checkpoint_exists', mock.Mock(return_value=True)) + def test_delete_checkpoint_multiasic(self): + checkpointname = "checkpointname" + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.delete_checkpoint = MagicMock() + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["delete-checkpoint"], + [checkpointname], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Checkpoint deleted successfully.", result.output) + @classmethod def teardown_class(cls): print("TEARDOWN") @@ -2741,4 +3877,4 @@ def teardown_class(cls): from .mock_tables import dbconnector from .mock_tables import mock_single_asic importlib.reload(mock_single_asic) - dbconnector.load_database_config() \ No newline at end of file + dbconnector.load_database_config() diff --git a/tests/conftest.py b/tests/conftest.py index 72b28515bb..5dd31d523a 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -386,6 +386,13 @@ def mock_run_show_summ_bgp_command_no_ext_neigh_on_asic1( else: return "" + def mock_multi_asic_list(): + return ["asic0", "asic1"] + + # mock multi-asic list + if request.param == "bgp_v4_network_all_asic": + multi_asic.get_namespace_list = mock_multi_asic_list + _old_run_bgp_command = bgp_util.run_bgp_command if request.param == 'ip_route_for_int_ip': bgp_util.run_bgp_command = mock_run_bgp_command_for_static diff --git a/tests/console_mock/dev/ttyACM1 b/tests/console_mock/dev/ttyACM1 new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/console_mock/dev/ttyUSB0 b/tests/console_mock/dev/ttyUSB0 new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/console_test.py b/tests/console_test.py index 528f5f4ba8..4a52a3c52e 100644 --- a/tests/console_test.py +++ b/tests/console_test.py @@ -14,10 +14,15 @@ from click.testing import CliRunner from utilities_common.db import Db -from consutil.lib import * +from consutil.lib import ConsolePortProvider, ConsolePortInfo, ConsoleSession, SysInfoProvider, DbUtils, \ + InvalidConfigurationError, LineBusyError, LineNotFoundError, ConnectionFailedError from sonic_py_common import device_info from jsonpatch import JsonPatchConflict +SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) +CONSOLE_MOCK_DIR = SCRIPT_DIR + "/console_mock" + + class TestConfigConsoleCommands(object): @classmethod def setup_class(cls): @@ -543,17 +548,15 @@ def test_sys_info_provider_init_device_prefix_plugin(self): with mock.patch("builtins.open", mock.mock_open(read_data="C0-")): SysInfoProvider.init_device_prefix() assert SysInfoProvider.DEVICE_PREFIX == "/dev/C0-" - SysInfoProvider.DEVICE_PREFIX = "/dev/ttyUSB" - @mock.patch('consutil.lib.SysInfoProvider.run_command', mock.MagicMock(return_value=("/dev/ttyUSB0\n/dev/ttyACM1", ""))) def test_sys_info_provider_list_console_ttys(self): - SysInfoProvider.DEVICE_PREFIX == "/dev/ttyUSB" + SysInfoProvider.DEVICE_PREFIX = CONSOLE_MOCK_DIR + "/dev/ttyUSB" ttys = SysInfoProvider.list_console_ttys() print(SysInfoProvider.DEVICE_PREFIX) assert len(ttys) == 1 - @mock.patch('consutil.lib.SysInfoProvider.run_command', mock.MagicMock(return_value=("", "ls: cannot access '/dev/ttyUSB*': No such file or directory"))) def test_sys_info_provider_list_console_ttys_device_not_exists(self): + SysInfoProvider.DEVICE_PREFIX = CONSOLE_MOCK_DIR + "/dev_not_exist/ttyUSB" ttys = SysInfoProvider.list_console_ttys() assert len(ttys) == 0 @@ -563,7 +566,7 @@ def test_sys_info_provider_list_console_ttys_device_not_exists(self): """ @mock.patch('consutil.lib.SysInfoProvider.run_command', mock.MagicMock(return_value=all_active_processes_output)) def test_sys_info_provider_list_active_console_processes(self): - SysInfoProvider.DEVICE_PREFIX == "/dev/ttyUSB" + SysInfoProvider.DEVICE_PREFIX = "/dev/ttyUSB" procs = SysInfoProvider.list_active_console_processes() assert len(procs) == 1 assert "0" in procs @@ -572,7 +575,7 @@ def test_sys_info_provider_list_active_console_processes(self): active_process_output = "13751 Wed Mar 6 08:31:35 2019 /usr/bin/sudo picocom -b 9600 -f n /dev/ttyUSB1" @mock.patch('consutil.lib.SysInfoProvider.run_command', mock.MagicMock(return_value=active_process_output)) def test_sys_info_provider_get_active_console_process_info_exists(self): - SysInfoProvider.DEVICE_PREFIX == "/dev/ttyUSB" + SysInfoProvider.DEVICE_PREFIX = "/dev/ttyUSB" proc = SysInfoProvider.get_active_console_process_info("13751") assert proc is not None assert proc == ("1", "13751", "Wed Mar 6 08:31:35 2019") @@ -580,7 +583,7 @@ def test_sys_info_provider_get_active_console_process_info_exists(self): active_process_empty_output = "" @mock.patch('consutil.lib.SysInfoProvider.run_command', mock.MagicMock(return_value=active_process_empty_output)) def test_sys_info_provider_get_active_console_process_info_nonexists(self): - SysInfoProvider.DEVICE_PREFIX == "/dev/ttyUSB" + SysInfoProvider.DEVICE_PREFIX = "/dev/ttyUSB" proc = SysInfoProvider.get_active_console_process_info("2") assert proc is None diff --git a/tests/db_migrator_test.py b/tests/db_migrator_test.py index e21539766a..cdf4251bd7 100644 --- a/tests/db_migrator_test.py +++ b/tests/db_migrator_test.py @@ -74,24 +74,27 @@ class TestVersionComparison(object): def setup_class(cls): cls.version_comp_list = [ # Old format v.s old format - { 'v1' : 'version_1_0_1', 'v2' : 'version_1_0_2', 'result' : False }, - { 'v1' : 'version_1_0_2', 'v2' : 'version_1_0_1', 'result' : True }, - { 'v1' : 'version_1_0_1', 'v2' : 'version_2_0_1', 'result' : False }, - { 'v1' : 'version_2_0_1', 'v2' : 'version_1_0_1', 'result' : True }, + {'v1': 'version_1_0_1', 'v2': 'version_1_0_2', 'result': False}, + {'v1': 'version_1_0_2', 'v2': 'version_1_0_1', 'result': True}, + {'v1': 'version_1_0_1', 'v2': 'version_2_0_1', 'result': False}, + {'v1': 'version_2_0_1', 'v2': 'version_1_0_1', 'result': True}, # New format v.s old format - { 'v1' : 'version_1_0_1', 'v2' : 'version_202311_01', 'result' : False }, - { 'v1' : 'version_202311_01', 'v2' : 'version_1_0_1', 'result' : True }, - { 'v1' : 'version_1_0_1', 'v2' : 'version_master_01', 'result' : False }, - { 'v1' : 'version_master_01', 'v2' : 'version_1_0_1', 'result' : True }, + {'v1': 'version_1_0_1', 'v2': 'version_202311_01', 'result': False}, + {'v1': 'version_202311_01', 'v2': 'version_1_0_1', 'result': True}, + {'v1': 'version_1_0_1', 'v2': 'version_master_01', 'result': False}, + {'v1': 'version_master_01', 'v2': 'version_1_0_1', 'result': True}, # New format v.s new format - { 'v1' : 'version_202311_01', 'v2' : 'version_202311_02', 'result' : False }, - { 'v1' : 'version_202311_02', 'v2' : 'version_202311_01', 'result' : True }, - { 'v1' : 'version_202305_01', 'v2' : 'version_202311_01', 'result' : False }, - { 'v1' : 'version_202311_01', 'v2' : 'version_202305_01', 'result' : True }, - { 'v1' : 'version_202311_01', 'v2' : 'version_master_01', 'result' : False }, - { 'v1' : 'version_master_01', 'v2' : 'version_202311_01', 'result' : True }, - { 'v1' : 'version_master_01', 'v2' : 'version_master_02', 'result' : False }, - { 'v1' : 'version_master_02', 'v2' : 'version_master_01', 'result' : True }, + {'v1': 'version_202311_01', 'v2': 'version_202311_02', 'result': False}, + {'v1': 'version_202311_02', 'v2': 'version_202311_01', 'result': True}, + {'v1': 'version_202305_01', 'v2': 'version_202311_01', 'result': False}, + {'v1': 'version_202311_01', 'v2': 'version_202305_01', 'result': True}, + {'v1': 'version_202405_01', 'v2': 'version_202411_01', 'result': False}, + {'v1': 'version_202411_01', 'v2': 'version_202405_01', 'result': True}, + {'v1': 'version_202411_01', 'v2': 'version_master_01', 'result': False}, + {'v1': 'version_202311_01', 'v2': 'version_master_01', 'result': False}, + {'v1': 'version_master_01', 'v2': 'version_202311_01', 'result': True}, + {'v1': 'version_master_01', 'v2': 'version_master_02', 'result': False}, + {'v1': 'version_master_02', 'v2': 'version_master_01', 'result': True}, ] def test_version_comparison(self): @@ -383,7 +386,7 @@ def test_dns_nameserver_migrator(self): dbmgtr.migrate() dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'dns-nameserver-expected') expected_db = Db() - advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_202405_01') + advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_202411_01') resulting_keys = dbmgtr.configDB.keys(dbmgtr.configDB.CONFIG_DB, 'DNS_NAMESERVER*') expected_keys = expected_db.cfgdb.keys(expected_db.cfgdb.CONFIG_DB, 'DNS_NAMESERVER*') @@ -895,7 +898,7 @@ def test_init(self, mock_args): @mock.patch('swsscommon.swsscommon.SonicDBConfig.isInit', mock.MagicMock(return_value=False)) @mock.patch('swsscommon.swsscommon.SonicDBConfig.initialize', mock.MagicMock()) def test_init_no_namespace(self, mock_args): - mock_args.return_value=argparse.Namespace(namespace=None, operation='version_202405_01', socket=None) + mock_args.return_value = argparse.Namespace(namespace=None, operation='version_202411_01', socket=None) import db_migrator db_migrator.main() @@ -903,7 +906,7 @@ def test_init_no_namespace(self, mock_args): @mock.patch('swsscommon.swsscommon.SonicDBConfig.isGlobalInit', mock.MagicMock(return_value=False)) @mock.patch('swsscommon.swsscommon.SonicDBConfig.initializeGlobalConfig', mock.MagicMock()) def test_init_namespace(self, mock_args): - mock_args.return_value=argparse.Namespace(namespace="asic0", operation='version_202405_01', socket=None) + mock_args.return_value = argparse.Namespace(namespace="asic0", operation='version_202411_01', socket=None) import db_migrator db_migrator.main() @@ -940,7 +943,7 @@ def test_dns_nameserver_migrator_minigraph(self): dbmgtr.migrate() dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'gnmi-minigraph-expected') expected_db = Db() - advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_202405_01') + advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_202411_01') resulting_table = dbmgtr.configDB.get_table("GNMI") expected_table = expected_db.cfgdb.get_table("GNMI") @@ -956,7 +959,7 @@ def test_dns_nameserver_migrator_configdb(self): dbmgtr.migrate() dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'gnmi-configdb-expected') expected_db = Db() - advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_202405_01') + advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_202411_01') resulting_table = dbmgtr.configDB.get_table("GNMI") expected_table = expected_db.cfgdb.get_table("GNMI") diff --git a/tests/disk_check_test.py b/tests/disk_check_test.py index 82b8b16ff6..ac541b05b9 100644 --- a/tests/disk_check_test.py +++ b/tests/disk_check_test.py @@ -1,7 +1,6 @@ import sys import syslog from unittest.mock import patch -import pytest import subprocess sys.path.append("scripts") @@ -178,3 +177,7 @@ def test_readonly(self, mock_proc, mock_log): assert max_log_lvl == syslog.LOG_ERR + @classmethod + def teardown_class(cls): + subprocess.run("rm -rf /tmp/tmp*", shell=True) # cleanup the temporary dirs + print("TEARDOWN") diff --git a/tests/drops_group_test.py b/tests/drops_group_test.py index d374275a48..93f99e3f1b 100644 --- a/tests/drops_group_test.py +++ b/tests/drops_group_test.py @@ -3,6 +3,7 @@ import shutil from click.testing import CliRunner +from utilities_common.cli import UserCache test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) @@ -20,13 +21,13 @@ SWITCH_EGRESS_DROPS 2 PORT_INGRESS_DROPS - IP_HEADER_ERROR - NO_L3_HEADER + IP_HEADER_ERROR + NO_L3_HEADER SWITCH_EGRESS_DROPS - ACL_ANY - L2_ANY - L3_ANY + ACL_ANY + L2_ANY + L3_ANY """ expected_counter_configuration = """\ @@ -56,6 +57,21 @@ sonic_drops_test 1000 0 """ +expected_counts_voq = """\ + SWITCH-ID PKT_INTEGRITY_ERR +---------------- ------------------- +sonic_drops_test 500 + + IFACE STATE RX_ERR RX_DROPS TX_ERR TX_DROPS DEBUG_0 DEBUG_2 +--------- ------- -------- ---------- -------- ---------- --------- --------- +Ethernet0 D 10 100 0 0 80 20 +Ethernet4 N/A 0 1000 0 0 800 100 +Ethernet8 N/A 100 10 0 0 10 0 + + DEVICE SWITCH_DROPS lowercase_counter +---------------- -------------- ------------------- +sonic_drops_test 1000 0 +""" expected_counts_with_group = """ DEVICE SWITCH_DROPS ---------------- -------------- @@ -82,14 +98,17 @@ sonic_drops_test 0 0 """ -dropstat_path = "/tmp/dropstat-27" + +def remove_tmp_dropstat_file(): + # remove the tmp portstat + cache = UserCache("dropstat") + cache.remove_all() class TestDropCounters(object): @classmethod def setup_class(cls): print("SETUP") - if os.path.exists(dropstat_path): - shutil.rmtree(dropstat_path) + remove_tmp_dropstat_file() os.environ["PATH"] += os.pathsep + scripts_path os.environ["UTILITIES_UNIT_TESTING"] = "1" @@ -117,6 +136,14 @@ def test_show_counts(self): print(result.output) assert result.output == expected_counts + def test_show_counts_voq(self): + runner = CliRunner() + os.environ["VOQ_DROP_COUNTER_TESTING"] = "1" + result = runner.invoke(show.cli.commands["dropcounters"].commands["counts"], []) + os.environ["VOQ_DROP_COUNTER_TESTING"] = "0" + print(result.output) + assert result.output == expected_counts_voq + def test_show_counts_with_group(self): runner = CliRunner() result = runner.invoke(show.cli.commands["dropcounters"].commands["counts"], ["-g", "PACKET_DROPS"]) diff --git a/tests/ecn_input/ecn_test_vectors.py b/tests/ecn_input/ecn_test_vectors.py index c53bf48a24..fe47f0b7a3 100644 --- a/tests/ecn_input/ecn_test_vectors.py +++ b/tests/ecn_input/ecn_test_vectors.py @@ -18,205 +18,356 @@ """ +ecn_show_config_output_specific_namespace = """\ +Profile: AZURE_LOSSLESS +----------------------- ------- +red_max_threshold 2097152 +ecn ecn_all +green_min_threshold 1048576 +red_min_threshold 1048576 +yellow_min_threshold 1048576 +green_max_threshold 2097152 +green_drop_probability 5 +yellow_max_threshold 2097152 +yellow_drop_probability 5 +red_drop_probability 5 +----------------------- ------- + +""" + +ecn_show_config_output_multi = """\ +Profile: AZURE_LOSSLESS +----------------------- ------- +red_max_threshold 2097152 +ecn ecn_all +green_min_threshold 1048576 +red_min_threshold 1048576 +yellow_min_threshold 1048576 +green_max_threshold 2097152 +green_drop_probability 5 +yellow_max_threshold 2097152 +yellow_drop_probability 5 +red_drop_probability 5 +----------------------- ------- + +Profile: AZURE_LOSSY +----------------------- ----- +red_max_threshold 32760 +red_min_threshold 4095 +yellow_max_threshold 32760 +yellow_min_threshold 4095 +green_max_threshold 32760 +green_min_threshold 4095 +yellow_drop_probability 2 +----------------------- ----- + +""" + testData = { - 'ecn_show_config' : {'cmd' : ['show'], - 'args' : [], - 'rc' : 0, - 'rc_output': ecn_show_config_output + 'ecn_show_config': {'cmd': ['show'], + 'args': [], + 'rc': 0, + 'rc_output': ecn_show_config_output }, - 'ecn_show_config_verbose' : {'cmd' : ['q_cmd'], - 'args' : ['-l', '-vv'], - 'rc' : 0, - 'rc_output': ecn_show_config_output + 'Total profiles: 1\n' + 'ecn_show_config_verbose': {'cmd': ['q_cmd'], + 'args': ['-l', '-vv'], + 'rc': 0, + 'rc_output': ecn_show_config_output + 'Total profiles: 1\n' }, - 'ecn_cfg_gmin' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-gmin', '1048600'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,green_min_threshold,1048600'] + 'ecn_cfg_gmin': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-gmin', '1048600'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,green_min_threshold,1048600'] }, - 'ecn_cfg_gmin_verbose' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-gmin', '1048600', '-vv'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,green_min_threshold,1048600'], - 'rc_output' : 'Running command: ecnconfig -p AZURE_LOSSLESS -gmin 1048600 -vv\nSetting green_min_threshold value to 1048600\n' + 'ecn_cfg_gmin_verbose': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-gmin', '1048600', '-vv'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,green_min_threshold,1048600'], + 'rc_output': ('Running command: ecnconfig -p AZURE_LOSSLESS -gmin 1048600 -vv\n' + 'Setting green_min_threshold value to 1048600\n') }, - 'ecn_cfg_gmax' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-gmax', '2097153'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,green_max_threshold,2097153'] + 'ecn_cfg_gmax': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-gmax', '2097153'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,green_max_threshold,2097153'] }, - 'ecn_cfg_ymin' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-ymin', '1048600'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,yellow_min_threshold,1048600'] + 'ecn_cfg_ymin': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-ymin', '1048600'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,yellow_min_threshold,1048600'] }, - 'ecn_cfg_ymax' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-ymax', '2097153'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,yellow_max_threshold,2097153'] + 'ecn_cfg_ymax': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-ymax', '2097153'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,yellow_max_threshold,2097153'] }, - 'ecn_cfg_rmin' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-rmin', '1048600'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,red_min_threshold,1048600'] + 'ecn_cfg_rmin': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-rmin', '1048600'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,red_min_threshold,1048600'] }, - 'ecn_cfg_rmax' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-rmax', '2097153'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,red_max_threshold,2097153'] + 'ecn_cfg_rmax': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-rmax', '2097153'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,red_max_threshold,2097153'] }, - 'ecn_cfg_rdrop' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-rdrop', '10'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,red_drop_probability,10'] + 'ecn_cfg_rdrop': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-rdrop', '10'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,red_drop_probability,10'] }, - 'ecn_cfg_ydrop' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-ydrop', '11'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,yellow_drop_probability,11'] + 'ecn_cfg_ydrop': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-ydrop', '11'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,yellow_drop_probability,11'] }, - 'ecn_cfg_gdrop' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-gdrop', '12'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,green_drop_probability,12'] + 'ecn_cfg_gdrop': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-gdrop', '12'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,green_drop_probability,12'] }, - 'ecn_cfg_gdrop_verbose' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-gdrop', '12', '-vv'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,green_drop_probability,12'], - 'rc_output' : 'Running command: ecnconfig -p AZURE_LOSSLESS -gdrop 12 -vv\nSetting green_drop_probability value to 12%\n' + 'ecn_cfg_gdrop_verbose': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-gdrop', '12', '-vv'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,green_drop_probability,12'], + 'rc_output': ('Running command: ecnconfig -p AZURE_LOSSLESS -gdrop 12 -vv\n' + 'Setting green_drop_probability value to 12%\n') }, - 'ecn_cfg_multi_set' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-gdrop', '12', '-gmax', '2097153'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,green_drop_probability,12', - 'AZURE_LOSSLESS,green_max_threshold,2097153' - ] + 'ecn_cfg_multi_set': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-gdrop', '12', '-gmax', '2097153'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,green_drop_probability,12', + ',AZURE_LOSSLESS,green_max_threshold,2097153'] }, - 'ecn_cfg_gmin_gmax_invalid' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-gmax', '2097153', '-gmin', '2097154'], - 'rc' : 1, - 'rc_msg' : 'Invalid gmin (2097154) and gmax (2097153). gmin should be smaller than gmax' + 'ecn_cfg_gmin_gmax_invalid': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-gmax', + '2097153', '-gmin', '2097154'], + 'rc': 1, + 'rc_msg': ('Invalid gmin (2097154) and gmax (2097153).' + ' gmin should be smaller than gmax') }, - 'ecn_cfg_ymin_ymax_invalid' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-ymax', '2097153', '-ymin', '2097154'], - 'rc' : 1, - 'rc_msg' : 'Invalid ymin (2097154) and ymax (2097153). ymin should be smaller than ymax' + 'ecn_cfg_ymin_ymax_invalid': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-ymax', + '2097153', '-ymin', '2097154'], + 'rc': 1, + 'rc_msg': ('Invalid ymin (2097154) and ymax (2097153).' + ' ymin should be smaller than ymax') }, - 'ecn_cfg_rmin_rmax_invalid' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-rmax', '2097153', '-rmin', '2097154'], - 'rc' : 1, - 'rc_msg' : 'Invalid rmin (2097154) and rmax (2097153). rmin should be smaller than rmax' + 'ecn_cfg_rmin_rmax_invalid': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-rmax', + '2097153', '-rmin', '2097154'], + 'rc': 1, + 'rc_msg': ('Invalid rmin (2097154) and rmax (2097153).' + ' rmin should be smaller than rmax') }, - 'ecn_cfg_rmax_invalid' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-rmax', '-2097153'], - 'rc' : 1, - 'rc_msg' : 'Invalid rmax (-2097153). rmax should be an non-negative integer' - }, - 'ecn_cfg_rdrop_invalid' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-rdrop', '105'], - 'rc' : 1, - 'rc_msg' : 'Invalid value for "-rdrop": 105 is not in the valid range of 0 to 100' + 'ecn_cfg_rmax_invalid': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-rmax', '-2097153'], + 'rc': 1, + 'rc_msg': 'Invalid rmax (-2097153). rmax should be an non-negative integer' }, - 'ecn_q_get' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3'], - 'rc' : 0, - 'rc_msg' : 'ECN status:\nqueue 3: on\n', - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3', '4'] + 'ecn_cfg_rdrop_invalid': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-rdrop', '105'], + 'rc': 1, + 'rc_msg': 'Invalid value for "-rdrop": 105 is not in the valid range of 0 to 100' + }, + 'ecn_q_get': {'cmd': ['q_cmd'], + 'args': ['-q', '3'], + 'rc': 0, + 'rc_msg': 'ECN status:\nqueue 3: on\n', + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3', '4'] }, - 'ecn_q_get_verbose' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3', '-vv'], - 'rc' : 0, - 'rc_msg' : 'ECN status:\n{0} queue 3: on\n', - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3', '4'], - 'db_table' : 'DEVICE_NEIGHBOR' + 'ecn_q_get_verbose': {'cmd': ['q_cmd'], + 'args': ['-q', '3', '-vv'], + 'rc': 0, + 'rc_msg': 'ECN status:\n{0} queue 3: on\n', + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3', '4'], + 'db_table': 'DEVICE_NEIGHBOR' }, - 'ecn_lossy_q_get' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '2'], - 'rc' : 0, - 'rc_msg' : 'ECN status:\nqueue 2: off\n', - 'cmp_args' : [None], - 'cmp_q_args' : ['2'] + 'ecn_lossy_q_get': {'cmd': ['q_cmd'], + 'args': ['-q', '2'], + 'rc': 0, + 'rc_msg': 'ECN status:\nqueue 2: off\n', + 'cmp_args': [',None,None'], + 'cmp_q_args': ['2'] }, - 'ecn_q_all_get_verbose' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3,4', '-vv'], - 'rc' : 0, - 'rc_msg' : 'ECN status:\n{0} queue 3: on\n{0} queue 4: on\n', - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3', '4'], - 'db_table' : 'DEVICE_NEIGHBOR' + 'ecn_q_all_get_verbose': {'cmd': ['q_cmd'], + 'args': ['-q', '3,4', '-vv'], + 'rc': 0, + 'rc_msg': 'ECN status:\n{0} queue 3: on\n{0} queue 4: on\n', + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3', '4'], + 'db_table': 'DEVICE_NEIGHBOR' }, - 'ecn_q_all_get' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3,4'], - 'rc' : 0, - 'rc_msg' : 'ECN status:\nqueue 3: on\nqueue 4: on\n', - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3', '4'] + 'ecn_q_all_get': {'cmd': ['q_cmd'], + 'args': ['-q', '3,4'], + 'rc': 0, + 'rc_msg': 'ECN status:\nqueue 3: on\nqueue 4: on\n', + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3', '4'] }, - 'ecn_cfg_q_all_off' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3,4', 'off'], - 'rc' : 0, - 'cmp_args' : [None], - 'cmp_q_args' : ['3', '4'] - }, - 'ecn_cfg_q_all_off_verbose' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3,4', 'off', '-vv'], - 'rc' : 0, - 'cmp_args' : [None], - 'cmp_q_args' : ['3', '4'], - 'db_table' : 'DEVICE_NEIGHBOR', - 'rc_msg' : 'Disable ECN on {0} queue 3\nDisable ECN on {0} queue 4' + 'ecn_cfg_q_all_off': {'cmd': ['q_cmd'], + 'args': ['-q', '3,4', 'off'], + 'rc': 0, + 'cmp_args': [',None,None'], + 'cmp_q_args': ['3', '4'] + }, + 'ecn_cfg_q_all_off_verbose': {'cmd': ['q_cmd'], + 'args': ['-q', '3,4', 'off', '-vv'], + 'rc': 0, + 'cmp_args': [',None,None'], + 'cmp_q_args': ['3', '4'], + 'db_table': 'DEVICE_NEIGHBOR', + 'rc_msg': 'Disable ECN on {0} queue 3\nDisable ECN on {0} queue 4' }, - 'ecn_cfg_q_off' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3', 'off'], - 'rc' : 0, - 'cmp_args' : [None, 'wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3'], - 'other_q' : ['4'] + 'ecn_cfg_q_off': {'cmd': ['q_cmd'], + 'args': ['-q', '3', 'off'], + 'rc': 0, + 'cmp_args': [',None,None', ',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3'], + 'other_q': ['4'] }, - 'ecn_cfg_q_off_verbose' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3', 'off', '-vv'], - 'rc' : 0, - 'cmp_args' : [None, 'wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3'], - 'other_q' : ['4'], - 'db_table' : 'DEVICE_NEIGHBOR', - 'rc_msg' : 'Disable ECN on {0} queue 3' + 'ecn_cfg_q_off_verbose': {'cmd': ['q_cmd'], + 'args': ['-q', '3', 'off', '-vv'], + 'rc': 0, + 'cmp_args': [',None,None', ',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3'], + 'other_q': ['4'], + 'db_table': 'DEVICE_NEIGHBOR', + 'rc_msg': 'Disable ECN on {0} queue 3' }, - 'ecn_cfg_q_all_on' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3,4', 'on'], - 'rc' : 0, - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3', '4'] + 'ecn_cfg_q_all_on': {'cmd': ['q_cmd'], + 'args': ['-q', '3,4', 'on'], + 'rc': 0, + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3', '4'] }, - 'ecn_cfg_q_all_on_verbose' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3,4', 'on', '-vv'], - 'rc' : 0, - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3', '4'], - 'db_table' : 'DEVICE_NEIGHBOR', - 'rc_msg' : 'Enable ECN on {0} queue 3\nEnable ECN on {0} queue 4' + 'ecn_cfg_q_all_on_verbose': {'cmd': ['q_cmd'], + 'args': ['-q', '3,4', 'on', '-vv'], + 'rc': 0, + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3', '4'], + 'db_table': 'DEVICE_NEIGHBOR', + 'rc_msg': 'Enable ECN on {0} queue 3\nEnable ECN on {0} queue 4' }, - 'ecn_cfg_q_on' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '4', 'on'], - 'rc' : 0, - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3', '4'] + 'ecn_cfg_q_on': {'cmd': ['q_cmd'], + 'args': ['-q', '4', 'on'], + 'rc': 0, + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3', '4'] }, - 'ecn_cfg_q_on_verbose' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '4', 'on', '-vv'], - 'rc' : 0, - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3', '4'], - 'db_table' : 'DEVICE_NEIGHBOR', - 'rc_msg' : 'Enable ECN on {0} queue 4' + 'ecn_cfg_q_on_verbose': {'cmd': ['q_cmd'], + 'args': ['-q', '4', 'on', '-vv'], + 'rc': 0, + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3', '4'], + 'db_table': 'DEVICE_NEIGHBOR', + 'rc_msg': 'Enable ECN on {0} queue 4' }, - 'ecn_cfg_lossy_q_on' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '0,1,2,5,6,7', 'on'], - 'rc' : 0, - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['0', '1', '2', '5', '6', '7'] - } + 'ecn_cfg_lossy_q_on': {'cmd': ['q_cmd'], + 'args': ['-q', '0,1,2,5,6,7', 'on'], + 'rc': 0, + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['0', '1', '2', '5', '6', '7'] + }, + 'ecn_show_config_masic': {'cmd': ['show_masic'], + 'args': ['-l'], + 'rc': 0, + 'rc_output': ecn_show_config_output_multi, + }, + 'test_ecn_show_config_verbose_masic': {'cmd': ['show_masic'], + 'args': ['-l', '-vv'], + 'rc': 0, + 'rc_output': ecn_show_config_output_multi + 'Total profiles: 2\n', + }, + 'test_ecn_show_config_namespace': {'cmd': ['show_masic'], + 'args': ['-l', '-n', 'asic0'], + 'rc': 0, + 'rc_output': ecn_show_config_output_specific_namespace, + }, + 'test_ecn_show_config_namespace_verbose': {'cmd': ['show_masic'], + 'args': ['-l', '-n', 'asic0', '-vv'], + 'rc': 0, + 'rc_output': ecn_show_config_output_specific_namespace + + 'Total profiles: 1\n', + }, + 'ecn_cfg_threshold_masic': {'cmd': ['config_masic'], + 'args': ['-p', 'AZURE_LOSSY', '-gmax', '35000', '-n', 'asic1'], + 'rc': 0, + 'cmp_args': ['asic1,AZURE_LOSSY,green_max_threshold,35000'] + }, + 'ecn_cfg_probability_masic': {'cmd': ['config_masic'], + 'args': ['-p', 'AZURE_LOSSY', '-ydrop', '3', '-n', 'asic1'], + 'rc': 0, + 'cmp_args': ['asic1,AZURE_LOSSY,yellow_drop_probability,3'] + }, + 'ecn_cfg_gdrop_verbose_all_masic': {'cmd': ['config_masic'], + 'args': ['-p', 'AZURE_LOSSLESS', '-gdrop', '12', '-vv'], + 'rc': 0, + 'cmp_args': ['asic0-asic1,AZURE_LOSSLESS,green_drop_probability,12'], + 'rc_output': ('Setting green_drop_probability value to 12% ' + 'for namespace asic0\n' + 'Setting green_drop_probability value to 12% ' + 'for namespace asic1\n') + }, + 'ecn_cfg_multi_set_verbose_all_masic': {'cmd': ['config_masic'], + 'args': ['-p', 'AZURE_LOSSLESS', '-gdrop', + '14', '-gmax', '2097153', '-vv'], + 'rc': 0, + 'cmp_args': [('asic0-asic1,AZURE_LOSSLESS,' + 'green_drop_probability,14'), + ('asic0-asic1,AZURE_LOSSLESS,' + 'green_max_threshold,2097153')], + 'rc_output': ('Setting green_max_threshold value to 2097153 ' + 'for namespace asic0\n' + 'Setting green_max_threshold value to 2097153 ' + 'for namespace asic1\n' + 'Setting green_drop_probability value to 14% ' + 'for namespace asic0\n' + 'Setting green_drop_probability value to 14% ' + 'for namespace asic1\n') + }, + 'ecn_q_get_masic': {'cmd': ['q_cmd'], + 'args': ['-q', '1', '-n', 'asic0'], + 'rc': 0, + 'rc_msg': 'ECN status for namespace asic0:\nqueue 1: on\n', + 'cmp_args': ['asic0,wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['1'] + }, + 'ecn_q_get_verbose_masic': {'cmd': ['q_cmd'], + 'args': ['-q', '1', '-vv', '-n', 'asic0'], + 'rc': 0, + 'rc_msg': 'ECN status for namespace asic0:\nEthernet4 queue 1: on\n', + 'cmp_args': ['asic0,wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['1'], + 'db_table': 'DEVICE_NEIGHBOR' + }, + 'ecn_q_get_all_ns_masic': {'cmd': ['q_cmd'], + 'args': ['-q', '0'], + 'rc': 0, + 'rc_msg': ('ECN status for namespace asic0:\nqueue 0: off\n' + 'ECN status for namespace asic1:\nqueue 0: on\n') + }, + 'ecn_q_get_all_ns_verbose_masic': {'cmd': ['q_cmd'], + 'args': ['-q', '0', '-vv'], + 'rc': 0, + 'rc_msg': ('ECN status for namespace asic0:\nEthernet4 queue 0: off\n' + 'ECN status for namespace asic1:\nEthernet0 queue 0: on\n') + }, + 'ecn_cfg_q_all_ns_off_masic': {'cmd': ['q_cmd'], + 'args': ['-q', '0,1', 'off'], + 'rc': 0, + 'cmp_args': ['asic0-asic1,None,None'], + 'cmp_q_args': ['0', '1'] + }, + 'ecn_cfg_q_one_ns_off_verbose_masic': {'cmd': ['q_cmd'], + 'args': ['-q', '1', 'on', '-n', 'asic1', '-vv'], + 'rc': 0, + 'rc_msg': 'Enable ECN on Ethernet0 queue 1\n', + 'cmp_args': ['asic1,wred_profile,AZURE_LOSSLESS', + 'asic1,wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['0'], + 'other_q': ['1'] + } } diff --git a/tests/ecn_test.py b/tests/ecn_test.py index 13474b12e8..5d2ac36011 100644 --- a/tests/ecn_test.py +++ b/tests/ecn_test.py @@ -6,11 +6,15 @@ from click.testing import CliRunner import config.main as config -from .ecn_input.ecn_test_vectors import * +from .ecn_input.ecn_test_vectors import testData from .utils import get_result_and_return_code from utilities_common.db import Db import show.main as show +# Constants +ARGS_DELIMITER = ',' +NAMESPACE_DELIMITER = '-' + test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) scripts_path = os.path.join(modules_path, "scripts") @@ -18,13 +22,107 @@ sys.path.insert(0, modules_path) -class TestEcnConfig(object): +class TestEcnConfigBase(object): @classmethod def setup_class(cls): + print("SETUP") os.environ["PATH"] += os.pathsep + scripts_path os.environ['UTILITIES_UNIT_TESTING'] = "2" - print("SETUP") + def process_cmp_args(self, cmp_args): + """ + The arguments are a string marked by delimiters + Arguments marked as 'None', are treated as None objects + First arg is always a collection of namespaces + """ + + args = cmp_args.split(ARGS_DELIMITER) + args = [None if arg == "None" else arg for arg in args] + args[0] = args[0].split(NAMESPACE_DELIMITER) + return args + + def verify_profile(self, queue_db_entry, profile, value): + if profile is not None: + assert queue_db_entry[profile] == value + else: + assert profile not in queue_db_entry,\ + "Profile needs to be fully removed from table to propagate NULL OID to SAI" + + def executor(self, input): + runner = CliRunner() + + if 'db_table' in input: + db = Db() + data_list = list(db.cfgdb.get_table(input['db_table'])) + input['rc_msg'] = input['rc_msg'].format(",".join(data_list)) + + if 'show' in input['cmd']: + exec_cmd = show.cli.commands["ecn"] + result = runner.invoke(exec_cmd, input['args']) + exit_code = result.exit_code + output = result.output + elif 'q_cmd' in input['cmd'] or 'show_masic' in input['cmd'] or 'config_masic' in input['cmd']: + exit_code, output = get_result_and_return_code(["ecnconfig"] + input['args']) + else: + exec_cmd = config.config.commands["ecn"] + result = runner.invoke(exec_cmd, input['args']) + exit_code = result.exit_code + output = result.output + + print(exit_code) + print(output) + + if input['rc'] == 0: + assert exit_code == 0 + else: + assert exit_code != 0 + + if 'cmp_args' in input: + fd = open('/tmp/ecnconfig', 'r') + cmp_data = json.load(fd) + + # Verify queue assignments + if 'cmp_q_args' in input: + namespaces, profile, value = self.process_cmp_args(input['cmp_args'][0]) + for namespace in namespaces: + for key in cmp_data[namespace]: + queue_idx = ast.literal_eval(key)[-1] + if queue_idx in input['cmp_q_args']: + self.verify_profile(cmp_data[namespace][key], profile, value) + + # other_q helps verify two different queue assignments + if 'other_q' in input: + namespaces1, profile1, value1 = self.process_cmp_args(input['cmp_args'][-1]) + for namespace1 in namespaces1: + for key in cmp_data[namespace1]: + queue_idx = ast.literal_eval(key)[-1] + if 'other_q' in input and queue_idx in input['other_q']: + self.verify_profile(cmp_data[namespace1][key], profile1, value1) + # Verify non-queue related assignments + else: + for args in input['cmp_args']: + namespaces, profile, name, value = self.process_cmp_args(args) + for namespace in namespaces: + assert(cmp_data[namespace][profile][name] == value) + fd.close() + + if 'rc_msg' in input: + assert input['rc_msg'] in output + + if 'rc_output' in input: + assert output == input['rc_output'] + + @classmethod + def teardown_class(cls): + os.environ['PATH'] = os.pathsep.join(os.environ['PATH'].split(os.pathsep)[:-1]) + os.environ['UTILITIES_UNIT_TESTING'] = "0" + + if os.path.isfile('/tmp/ecnconfig'): + os.remove('/tmp/ecnconfig') + print("TEARDOWN") + + +class TestEcnConfig(TestEcnConfigBase): def test_ecn_show_config(self): self.executor(testData['ecn_show_config']) @@ -123,77 +221,3 @@ def test_ecn_queue_set_all_on_verbose(self): def test_ecn_queue_set_lossy_q_on(self): self.executor(testData['ecn_cfg_lossy_q_on']) - - def process_cmp_args(self, cmp_args): - if cmp_args is None: - return (None, None) - return cmp_args.split(',') - - def verify_profile(self, queue_db_entry, profile, value): - if profile != None: - assert queue_db_entry[profile] == value - else: - assert profile not in queue_db_entry,\ - "Profile needs to be fully removed from table to propagate NULL OID to SAI" - - def executor(self, input): - runner = CliRunner() - - if 'db_table' in input: - db = Db() - data_list = list(db.cfgdb.get_table(input['db_table'])) - input['rc_msg'] = input['rc_msg'].format(",".join(data_list)) - - if 'show' in input['cmd']: - exec_cmd = show.cli.commands["ecn"] - result = runner.invoke(exec_cmd, input['args']) - exit_code = result.exit_code - output = result.output - elif 'q_cmd' in input['cmd'] : - exit_code, output = get_result_and_return_code(["ecnconfig"] + input['args']) - else: - exec_cmd = config.config.commands["ecn"] - result = runner.invoke(exec_cmd, input['args']) - exit_code = result.exit_code - output = result.output - - print(exit_code) - print(output) - - if input['rc'] == 0: - assert exit_code == 0 - else: - assert exit_code != 0 - - if 'cmp_args' in input: - fd = open('/tmp/ecnconfig', 'r') - cmp_data = json.load(fd) - if 'cmp_q_args' in input: - profile, value = self.process_cmp_args(input['cmp_args'][0]) - if 'other_q' in input: - profile1, value1 = self.process_cmp_args(input['cmp_args'][-1]) - for key in cmp_data: - queue_idx = ast.literal_eval(key)[-1] - if queue_idx in input['cmp_q_args']: - self.verify_profile(cmp_data[key], profile, value) - if 'other_q' in input and queue_idx in input['other_q']: - self.verify_profile(cmp_data[key], profile1, value1) - else: - for args in input['cmp_args']: - profile, name, value = args.split(',') - assert(cmp_data[profile][name] == value) - fd.close() - - if 'rc_msg' in input: - assert input['rc_msg'] in output - - if 'rc_output' in input: - assert output == input['rc_output'] - - @classmethod - def teardown_class(cls): - os.environ['PATH'] = os.pathsep.join(os.environ['PATH'].split(os.pathsep)[:-1]) - os.environ['UTILITIES_UNIT_TESTING'] = "0" - if os.path.isfile('/tmp/ecnconfig'): - os.remove('/tmp/ecnconfig') - print("TEARDOWN") diff --git a/tests/fabricstat_test.py b/tests/fabricstat_test.py index cc4c049806..a8a334cb92 100644 --- a/tests/fabricstat_test.py +++ b/tests/fabricstat_test.py @@ -200,6 +200,45 @@ 7 0 0 0 """ +multi_asic_fabric_rate = """\ + + ASIC Link ID Rx Data Mbps Tx Data Mbps +------ --------- -------------- -------------- + asic0 0 0 19.8 + asic0 1 0 19.8 + asic0 2 0 39.8 + asic0 3 0 39.8 + asic0 4 0 39.8 + asic0 5 0 39.8 + asic0 6 0 39.3 + asic0 7 0 39.3 + + ASIC Link ID Rx Data Mbps Tx Data Mbps +------ --------- -------------- -------------- + asic1 0 0 0 + asic1 1 0 0 + asic1 2 0 0 + asic1 3 0 0 + asic1 4 0 0 + asic1 5 0 0 + asic1 6 0 0 + asic1 7 0 0 +""" + +multi_asic_fabric_rate_asic0 = """\ + + ASIC Link ID Rx Data Mbps Tx Data Mbps +------ --------- -------------- -------------- + asic0 0 0 19.8 + asic0 1 0 19.8 + asic0 2 0 39.8 + asic0 3 0 39.8 + asic0 4 0 39.8 + asic0 5 0 39.8 + asic0 6 0 39.3 + asic0 7 0 39.3 +""" + class TestFabricStat(object): @classmethod def setup_class(cls): @@ -348,6 +387,20 @@ def test_multi_show_fabric_isolation_asic(self): assert return_code == 0 assert result == multi_asic_fabric_isolation_asic0 + def test_mutli_show_fabric_rate(self): + return_code, result = get_result_and_return_code(['fabricstat', '-s']) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == multi_asic_fabric_rate + + def test_multi_show_fabric_rate_asic(self): + return_code, result = get_result_and_return_code(['fabricstat', '-s', '-n', 'asic0']) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == multi_asic_fabric_rate_asic0 + @classmethod def teardown_class(cls): print("TEARDOWN") diff --git a/tests/flock_test.py b/tests/flock_test.py new file mode 100644 index 0000000000..7d9039dd2d --- /dev/null +++ b/tests/flock_test.py @@ -0,0 +1,187 @@ +import pytest +import tempfile +import threading +import time + +from unittest import mock +from utilities_common import flock + + +f0_exit = threading.Event() +f1_exit = threading.Event() +f2_exit = threading.Event() + + +def dummy_f0(): + while not f0_exit.is_set(): + time.sleep(1) + + +def dummy_f1(bypass_lock=False): + while not f1_exit.is_set(): + time.sleep(1) + + +def dummy_f2(bypass_lock=True): + while not f2_exit.is_set(): + time.sleep(1) + + +class TestFLock: + def setup(self): + print("SETUP") + f0_exit.clear() + f1_exit.clear() + f2_exit.clear() + + def test_flock_acquire_lock_non_blocking(self): + """Test flock non-blocking acquire lock.""" + with tempfile.NamedTemporaryFile() as fd0: + fd1 = open(fd0.name, "r") + + assert flock.acquire_flock(fd0.fileno(), 0) + assert not flock.acquire_flock(fd1.fileno(), 0) + + flock.release_flock(fd0.fileno()) + + assert flock.acquire_flock(fd1.fileno(), 0) + flock.release_flock(fd1.fileno()) + + def test_flock_acquire_lock_blocking(self): + """Test flock blocking acquire.""" + with tempfile.NamedTemporaryFile() as fd0: + fd1 = open(fd0.name, "r") + res = [] + + assert flock.acquire_flock(fd0.fileno(), 0) + thrd = threading.Thread(target=lambda: res.append(flock.acquire_flock(fd1.fileno(), -1))) + thrd.start() + + time.sleep(5) + assert thrd.is_alive() + + flock.release_flock(fd0.fileno()) + thrd.join() + assert len(res) == 1 and res[0] + + fd2 = open(fd0.name, "r") + assert not flock.acquire_flock(fd2.fileno(), 0) + + flock.release_flock(fd1.fileno()) + assert flock.acquire_flock(fd2.fileno(), 0) + flock.release_flock(fd2.fileno()) + + def test_flock_acquire_lock_timeout(self): + """Test flock timeout acquire.""" + with tempfile.NamedTemporaryFile() as fd0: + def acquire_helper(): + nonlocal elapsed + start = time.time() + res.append(flock.acquire_flock(fd1.fileno(), 5)) + end = time.time() + elapsed = end - start + + fd1 = open(fd0.name, "r") + elapsed = 0 + res = [] + + assert flock.acquire_flock(fd0.fileno(), 0) + thrd = threading.Thread(target=acquire_helper) + thrd.start() + + thrd.join() + assert ((len(res) == 1) and (not res[0])) + assert elapsed >= 5 + + flock.release_flock(fd0.fileno()) + + @mock.patch("click.echo") + def test_try_lock(self, mock_echo): + """Test try_lock decorator.""" + with tempfile.NamedTemporaryFile() as fd0: + def get_file_content(fd): + fd.seek(0) + return fd.read() + + f0_with_try_lock = flock.try_lock(fd0.name, timeout=0)(dummy_f0) + f1_with_try_lock = flock.try_lock(fd0.name, timeout=0)(dummy_f1) + + thrd = threading.Thread(target=f0_with_try_lock) + thrd.start() + time.sleep(2) + + try: + assert mock_echo.call_args_list == [mock.call(f"Acquired lock on {fd0.name}")] + assert b"dummy_f0" in get_file_content(fd0) + + with pytest.raises(SystemExit): + f1_with_try_lock() + assert mock_echo.call_args_list == [mock.call(f"Acquired lock on {fd0.name}"), + mock.call(f"Failed to acquire lock on {fd0.name}")] + finally: + f0_exit.set() + thrd.join() + + assert b"dummy_f0" not in get_file_content(fd0) + + thrd = threading.Thread(target=f1_with_try_lock) + thrd.start() + time.sleep(2) + + try: + assert mock_echo.call_args_list == [mock.call(f"Acquired lock on {fd0.name}"), + mock.call(f"Failed to acquire lock on {fd0.name}"), + mock.call(f"Released lock on {fd0.name}"), + mock.call(f"Acquired lock on {fd0.name}")] + assert b"dummy_f1" in get_file_content(fd0) + finally: + f1_exit.set() + thrd.join() + + assert b"dummy_f1" not in get_file_content(fd0) + + @mock.patch("click.echo") + def test_try_lock_with_bypass(self, mock_echo): + with tempfile.NamedTemporaryFile() as fd0: + def get_file_content(fd): + fd.seek(0) + return fd.read() + + f1_with_try_lock = flock.try_lock(fd0.name, timeout=0)(dummy_f1) + + thrd = threading.Thread(target=f1_with_try_lock, args=(True,)) + thrd.start() + time.sleep(2) + + try: + assert mock_echo.call_args_list == [mock.call(f"Bypass lock on {fd0.name}")] + assert b"dummy_f1" not in get_file_content(fd0) + finally: + f1_exit.set() + thrd.join() + + @mock.patch("click.echo") + def test_try_lock_with_bypass_default(self, mock_echo): + with tempfile.NamedTemporaryFile() as fd0: + def get_file_content(fd): + fd.seek(0) + return fd.read() + + f2_with_try_lock = flock.try_lock(fd0.name, timeout=0)(dummy_f2) + + thrd = threading.Thread(target=f2_with_try_lock) + thrd.start() + time.sleep(2) + + try: + assert mock_echo.call_args_list == [mock.call(f"Bypass lock on {fd0.name}")] + assert b"dummy_f2" not in get_file_content(fd0) + finally: + f2_exit.set() + thrd.join() + + def teardown(self): + print("TEARDOWN") + f0_exit.clear() + f1_exit.clear() + f2_exit.clear() diff --git a/tests/generic_config_updater/change_applier_test.py b/tests/generic_config_updater/change_applier_test.py index 4c9b33c3a4..7aad111f18 100644 --- a/tests/generic_config_updater/change_applier_test.py +++ b/tests/generic_config_updater/change_applier_test.py @@ -242,10 +242,11 @@ def test_change_apply(self, mock_set, mock_db, mock_subprocess_Popen): running_config = copy.deepcopy(read_data["running_data"]) json_changes = copy.deepcopy(read_data["json_changes"]) + generic_config_updater.change_applier.ChangeApplier.updater_conf = None generic_config_updater.change_applier.UPDATER_CONF_FILE = CONF_FILE generic_config_updater.change_applier.set_verbose(True) generic_config_updater.services_validator.set_verbose(True) - + applier = generic_config_updater.change_applier.ChangeApplier() debug_print("invoked applier") @@ -254,7 +255,7 @@ def test_change_apply(self, mock_set, mock_db, mock_subprocess_Popen): # Take copy for comparison start_running_config = copy.deepcopy(running_config) - + debug_print("main: json_change_index={}".format(json_change_index)) applier.apply(mock_obj()) @@ -297,4 +298,3 @@ def test_apply__calls_apply_change_to_config_db(self): # Assert applier.config_wrapper.apply_change_to_config_db.assert_has_calls([call(change)]) - diff --git a/tests/generic_config_updater/generic_updater_test.py b/tests/generic_config_updater/generic_updater_test.py index 96c25e3552..8480dc23b0 100644 --- a/tests/generic_config_updater/generic_updater_test.py +++ b/tests/generic_config_updater/generic_updater_test.py @@ -2,7 +2,7 @@ import os import shutil import unittest -from unittest.mock import MagicMock, Mock, call +from unittest.mock import MagicMock, Mock, call, patch from .gutest_helpers import create_side_effect_dict, Files import generic_config_updater.generic_updater as gu @@ -124,6 +124,8 @@ def __create_config_replacer(self, changes=None, verified_same_config=True): return gu.ConfigReplacer(patch_applier, config_wrapper, patch_wrapper) + +@patch('generic_config_updater.generic_updater.get_config_json', MagicMock(return_value={})) class TestFileSystemConfigRollbacker(unittest.TestCase): def setUp(self): self.checkpoints_dir = os.path.join(os.getcwd(),"checkpoints") diff --git a/tests/generic_config_updater/gu_common_test.py b/tests/generic_config_updater/gu_common_test.py index a2a776c0bb..21f50e0b7b 100644 --- a/tests/generic_config_updater/gu_common_test.py +++ b/tests/generic_config_updater/gu_common_test.py @@ -76,6 +76,28 @@ def test_ctor__default_values_set(self): self.assertEqual("/usr/local/yang-models", gu_common.YANG_DIR) + @patch('generic_config_updater.gu_common.subprocess.Popen') + def test_get_config_db_as_text(self, mock_popen): + config_wrapper = gu_common.ConfigWrapper() + mock_proc = MagicMock() + mock_proc.communicate = MagicMock( + return_value=("[]", None)) + mock_proc.returncode = 0 + mock_popen.return_value = mock_proc + actual = config_wrapper._get_config_db_as_text() + expected = "[]" + self.assertEqual(actual, expected) + + config_wrapper = gu_common.ConfigWrapper(scope="asic0") + mock_proc = MagicMock() + mock_proc.communicate = MagicMock( + return_value=("[]", None)) + mock_proc.returncode = 0 + mock_popen.return_value = mock_proc + actual = config_wrapper._get_config_db_as_text() + expected = "[]" + self.assertEqual(actual, expected) + def test_get_sonic_yang_as_json__returns_sonic_yang_as_json(self): # Arrange config_wrapper = self.config_wrapper_mock @@ -339,6 +361,13 @@ def test_validate_lanes__same_valid_lanes_multi_ports_no_spaces__failure(self): }} self.validate_lanes(config, '67') + def test_validate_lanes_default_value_duplicate_check(self): + config = {"PORT": { + "Ethernet0": {"lanes": "0", "speed": "10000"}, + "Ethernet1": {"lanes": "0", "speed": "10000"}, + }} + self.validate_lanes(config) + def validate_lanes(self, config_db, expected_error=None): # Arrange config_wrapper = gu_common.ConfigWrapper() diff --git a/tests/generic_config_updater/multiasic_change_applier_test.py b/tests/generic_config_updater/multiasic_change_applier_test.py index e8b277618f..0102cfff00 100644 --- a/tests/generic_config_updater/multiasic_change_applier_test.py +++ b/tests/generic_config_updater/multiasic_change_applier_test.py @@ -9,25 +9,124 @@ class TestMultiAsicChangeApplier(unittest.TestCase): - def test_extract_scope(self): + @patch('sonic_py_common.multi_asic.is_multi_asic') + def test_extract_scope_multiasic(self, mock_is_multi_asic): + mock_is_multi_asic.return_value = True test_paths_expectedresults = { - "/asic0/PORTCHANNEL/PortChannel102/admin_status": (True, "asic0", "/PORTCHANNEL/PortChannel102/admin_status"), - "/asic01/PORTCHANNEL/PortChannel102/admin_status": (True, "asic01", "/PORTCHANNEL/PortChannel102/admin_status"), - "/asic123456789/PORTCHANNEL/PortChannel102/admin_status": (True, "asic123456789", "/PORTCHANNEL/PortChannel102/admin_status"), - "/asic0123456789/PORTCHANNEL/PortChannel102/admin_status": (True, "asic0123456789", "/PORTCHANNEL/PortChannel102/admin_status"), - "/localhost/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": (True, "localhost", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled"), - "/asic1/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": (True, "asic1", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled"), - "/sometable/data": (True, "", "/sometable/data"), - "": (False, "", ""), - "localhostabc/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": (False, "", ""), + "/asic0/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic0", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/asic01/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic01", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/asic123456789/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic123456789", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/asic0123456789/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic0123456789", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/localhost/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": ( + True, "localhost", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled" + ), + "/asic1/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": ( + True, "asic1", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled" + ), + "/sometable/data": ( + False, "", "/sometable/data" + ), + "": ( + False, "", "" + ), + "localhostabc/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": ( + False, "", "" + ), + "/asic77": ( + False, "", "" + ), + "/Asic0/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/ASIC1/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/Localhost/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/LocalHost/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/asci1/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/asicx/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/asic-12/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + } + + for test_path, (result, expectedscope, expectedremainder) in test_paths_expectedresults.items(): + try: + scope, remainder = extract_scope(test_path) + assert(scope == expectedscope) + assert(remainder == expectedremainder) + except Exception: + assert(not result) + + @patch('sonic_py_common.multi_asic.is_multi_asic') + def test_extract_scope_singleasic(self, mock_is_multi_asic): + mock_is_multi_asic.return_value = False + test_paths_expectedresults = { + "/asic0/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic0", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/asic01/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic01", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/asic123456789/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic123456789", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/asic0123456789/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic0123456789", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/localhost/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": ( + True, "localhost", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled" + ), + "/asic1/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": ( + True, "asic1", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled" + ), + "/sometable/data": ( + True, "", "/sometable/data" + ), + "": ( + False, "", "" + ), + "localhostabc/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": ( + False, "", "" + ), "/asic77": (False, "", ""), - "/Asic0/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), - "/ASIC1/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), - "/Localhost/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), - "/LocalHost/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), - "/asci1/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), - "/asicx/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), - "/asic-12/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), + "/Asic0/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/ASIC1/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/Localhost/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/LocalHost/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/asci1/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/asicx/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/asic-12/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), } for test_path, (result, expectedscope, expectedremainder) in test_paths_expectedresults.items(): @@ -35,12 +134,12 @@ def test_extract_scope(self): scope, remainder = extract_scope(test_path) assert(scope == expectedscope) assert(remainder == expectedremainder) - except Exception as e: - assert(result == False) + except Exception: + assert(not result) @patch('generic_config_updater.change_applier.ChangeApplier._get_running_config', autospec=True) @patch('generic_config_updater.change_applier.ConfigDBConnector', autospec=True) - def test_apply_change_default_namespace(self, mock_ConfigDBConnector, mock_get_running_config): + def test_apply_change_default_scope(self, mock_ConfigDBConnector, mock_get_running_config): # Setup mock for ConfigDBConnector mock_db = MagicMock() mock_ConfigDBConnector.return_value = mock_db @@ -67,7 +166,7 @@ def test_apply_change_default_namespace(self, mock_ConfigDBConnector, mock_get_r } } - # Instantiate ChangeApplier with the default namespace + # Instantiate ChangeApplier with the default scope applier = generic_config_updater.change_applier.ChangeApplier() # Prepare a change object or data that applier.apply would use @@ -81,7 +180,7 @@ def test_apply_change_default_namespace(self, mock_ConfigDBConnector, mock_get_r @patch('generic_config_updater.change_applier.ChangeApplier._get_running_config', autospec=True) @patch('generic_config_updater.change_applier.ConfigDBConnector', autospec=True) - def test_apply_change_given_namespace(self, mock_ConfigDBConnector, mock_get_running_config): + def test_apply_change_given_scope(self, mock_ConfigDBConnector, mock_get_running_config): # Setup mock for ConfigDBConnector mock_db = MagicMock() mock_ConfigDBConnector.return_value = mock_db @@ -108,8 +207,8 @@ def test_apply_change_given_namespace(self, mock_ConfigDBConnector, mock_get_run } } - # Instantiate ChangeApplier with the default namespace - applier = generic_config_updater.change_applier.ChangeApplier(namespace="asic0") + # Instantiate ChangeApplier with the default scope + applier = generic_config_updater.change_applier.ChangeApplier(scope="asic0") # Prepare a change object or data that applier.apply would use change = MagicMock() @@ -117,7 +216,7 @@ def test_apply_change_given_namespace(self, mock_ConfigDBConnector, mock_get_run # Call the apply method with the change object applier.apply(change) - # Assert ConfigDBConnector called with the correct namespace + # Assert ConfigDBConnector called with the correct scope mock_ConfigDBConnector.assert_called_once_with(use_unix_socket_path=True, namespace="asic0") @patch('generic_config_updater.change_applier.ChangeApplier._get_running_config', autospec=True) @@ -129,9 +228,9 @@ def test_apply_change_failure(self, mock_ConfigDBConnector, mock_get_running_con # Setup mock for json.load to return some running configuration mock_get_running_config.side_effect = Exception("Failed to get running config") - # Instantiate ChangeApplier with a specific namespace to simulate applying changes in a multi-asic environment - namespace = "asic0" - applier = generic_config_updater.change_applier.ChangeApplier(namespace=namespace) + # Instantiate ChangeApplier with a specific scope to simulate applying changes in a multi-asic environment + scope = "asic0" + applier = generic_config_updater.change_applier.ChangeApplier(scope=scope) # Prepare a change object or data that applier.apply would use change = MagicMock() @@ -159,8 +258,8 @@ def test_apply_patch_with_empty_tables_failure(self, mock_ConfigDBConnector, moc } } - # Instantiate ChangeApplier with a specific namespace to simulate applying changes in a multi-asic environment - applier = generic_config_updater.change_applier.ChangeApplier(namespace="asic0") + # Instantiate ChangeApplier with a specific scope to simulate applying changes in a multi-asic environment + applier = generic_config_updater.change_applier.ChangeApplier(scope="asic0") # Prepare a change object or data that applier.apply would use, simulating a patch that requires non-empty tables change = MagicMock() diff --git a/tests/generic_config_updater/multiasic_generic_updater_test.py b/tests/generic_config_updater/multiasic_generic_updater_test.py index 4a55eb98be..5acdd391f0 100644 --- a/tests/generic_config_updater/multiasic_generic_updater_test.py +++ b/tests/generic_config_updater/multiasic_generic_updater_test.py @@ -19,7 +19,7 @@ class TestMultiAsicPatchApplier(unittest.TestCase): @patch('generic_config_updater.gu_common.PatchWrapper.simulate_patch') @patch('generic_config_updater.generic_updater.ChangeApplier') def test_apply_patch_specific_namespace(self, mock_ChangeApplier, mock_simulate_patch, mock_get_config, mock_get_empty_tables): - namespace = "asic0" + scope = "asic0" patch_data = jsonpatch.JsonPatch([ { "op": "add", @@ -158,10 +158,10 @@ def test_apply_patch_specific_namespace(self, mock_ChangeApplier, mock_simulate_ } } - patch_applier = generic_config_updater.generic_updater.PatchApplier(namespace=namespace) + patch_applier = generic_config_updater.generic_updater.PatchApplier(scope=scope) # Apply the patch and verify patch_applier.apply(patch_data) # Assertions to ensure the namespace is correctly used in underlying calls - mock_ChangeApplier.assert_called_once_with(namespace=namespace) + mock_ChangeApplier.assert_called_once_with(scope=scope) diff --git a/tests/lldp_test.py b/tests/lldp_test.py index 89177338e0..1d6e55152c 100644 --- a/tests/lldp_test.py +++ b/tests/lldp_test.py @@ -2,6 +2,7 @@ from click.testing import CliRunner from utilities_common.general import load_module_from_source +from importlib import reload test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) @@ -83,6 +84,22 @@ def test_get_info(self): output = lldp.get_summary_output(lldp_detail_info=True) assert output.strip('\n') == expected_lldpctl_xml_output[0].strip('\n') + def test_get_info_multi_asic(self): + from .mock_tables import mock_multi_asic + from .mock_tables import dbconnector + reload(mock_multi_asic) + dbconnector.load_namespace_config() + lldp = lldpshow.Lldpshow() + from .mock_tables import mock_single_asic + reload(mock_single_asic) + dbconnector.load_namespace_config() + lldp.lldp_instance = [''] + lldp.lldpraw = expected_lldpctl_xml_output + lldp.get_info(lldp_detail_info=True, lldp_port='Ethernet0') + lldp.parse_info(lldp_detail_info=True) + output = lldp.get_summary_output(lldp_detail_info=True) + assert output.strip('\n') == expected_lldpctl_xml_output[0].strip('\n') + @classmethod def teardown_class(cls): print("TEARDOWN") diff --git a/tests/mock_tables/asic0/config_db.json b/tests/mock_tables/asic0/config_db.json index 8b867bdc96..da38af13dd 100644 --- a/tests/mock_tables/asic0/config_db.json +++ b/tests/mock_tables/asic0/config_db.json @@ -303,5 +303,28 @@ "SYSLOG_CONFIG_FEATURE|database": { "rate_limit_interval": "222", "rate_limit_burst": "22222" + }, + "WRED_PROFILE|AZURE_LOSSLESS": { + "red_max_threshold": "2097152", + "ecn": "ecn_all", + "green_min_threshold": "1048576", + "red_min_threshold": "1048576", + "yellow_min_threshold": "1048576", + "green_max_threshold": "2097152", + "green_drop_probability": "5", + "yellow_max_threshold": "2097152", + "yellow_drop_probability": "5", + "red_drop_probability": "5" + }, + "DEVICE_NEIGHBOR|Ethernet4": { + "name": "Serverss0", + "port": "eth0" + }, + "QUEUE|Ethernet4|0": { + "scheduler": "[SCHEDULAR|scheduler.0]" + }, + "QUEUE|Ethernet4|1": { + "scheduler": "[SCHEDULAR|scheduler.0]", + "wred_profile": "AZURE_LOSSLESS" } } diff --git a/tests/mock_tables/asic0/state_db.json b/tests/mock_tables/asic0/state_db.json index 4f3f13c0ae..5ae87ea975 100644 --- a/tests/mock_tables/asic0/state_db.json +++ b/tests/mock_tables/asic0/state_db.json @@ -256,7 +256,12 @@ "FABRIC_PORT_TABLE|PORT0" : { "STATUS": "up", "REMOTE_MOD": "0", - "REMOTE_PORT": "79" + "REMOTE_PORT": "79", + "OLD_RX_RATE_AVG": "0", + "OLD_RX_DATA": "0", + "OLD_TX_RATE_AVG": "19.8", + "OLD_TX_DATA": "18490000000", + "LAST_TIME": "1676672799" }, "FABRIC_PORT_TABLE|PORT1" : { "STATUS": "down" @@ -264,7 +269,12 @@ "FABRIC_PORT_TABLE|PORT2" : { "STATUS": "up", "REMOTE_MOD": "0", - "REMOTE_PORT": "94" + "REMOTE_PORT": "94", + "OLD_RX_RATE_AVG": "0", + "OLD_RX_DATA": "0", + "OLD_TX_RATE_AVG": "39.8", + "OLD_TX_DATA": "24490000000", + "LAST_TIME": "1676672799" }, "FABRIC_PORT_TABLE|PORT3" : { "STATUS": "down" @@ -272,7 +282,12 @@ "FABRIC_PORT_TABLE|PORT4" : { "STATUS": "up", "REMOTE_MOD": "0", - "REMOTE_PORT": "85" + "REMOTE_PORT": "85", + "OLD_RX_RATE_AVG": "0", + "OLD_RX_DATA": "0", + "OLD_TX_RATE_AVG": "39.8", + "OLD_TX_DATA": "24490000000", + "LAST_TIME": "1676672799" }, "FABRIC_PORT_TABLE|PORT5" : { "STATUS": "down" @@ -280,12 +295,22 @@ "FABRIC_PORT_TABLE|PORT6" : { "STATUS": "up", "REMOTE_MOD": "0", - "REMOTE_PORT": "84" + "REMOTE_PORT": "84", + "OLD_RX_RATE_AVG": "0", + "OLD_RX_DATA": "0", + "OLD_TX_RATE_AVG": "39.3", + "OLD_TX_DATA": "24170000000", + "LAST_TIME": "1676672799" }, "FABRIC_PORT_TABLE|PORT7" : { "STATUS": "up", "REMOTE_MOD": "0", - "REMOTE_PORT": "93" + "REMOTE_PORT": "93", + "OLD_RX_RATE_AVG": "0", + "OLD_RX_DATA": "0", + "OLD_TX_RATE_AVG": "39.3", + "OLD_TX_DATA": "24190000000", + "LAST_TIME": "1676672799" }, "CHASSIS_MIDPLANE_TABLE|LINE-CARD0": { "ip_address": "127.0.0.1", diff --git a/tests/mock_tables/asic1/asic_db.json b/tests/mock_tables/asic1/asic_db.json new file mode 100644 index 0000000000..1a769b82b5 --- /dev/null +++ b/tests/mock_tables/asic1/asic_db.json @@ -0,0 +1,6 @@ +{ + "ASIC_STATE:SAI_OBJECT_TYPE_SWITCH:oid:0x21000000000000": { + "SAI_SWITCH_ATTR_INIT_SWITCH": "true", + "SAI_SWITCH_ATTR_SRC_MAC_ADDRESS": "DE:AD:BE:EF:CA:FE" + } +} diff --git a/tests/mock_tables/asic1/config_db.json b/tests/mock_tables/asic1/config_db.json index 56823ae113..1bcd812ef2 100644 --- a/tests/mock_tables/asic1/config_db.json +++ b/tests/mock_tables/asic1/config_db.json @@ -242,5 +242,25 @@ "SYSLOG_CONFIG_FEATURE|database": { "rate_limit_interval": "555", "rate_limit_burst": "55555" + }, + "WRED_PROFILE|AZURE_LOSSY": { + "red_max_threshold":"32760", + "red_min_threshold":"4095", + "yellow_max_threshold":"32760", + "yellow_min_threshold":"4095", + "green_max_threshold": "32760", + "green_min_threshold": "4095", + "yellow_drop_probability": "2" + }, + "DEVICE_NEIGHBOR|Ethernet0": { + "name": "Servers", + "port": "eth0" + }, + "QUEUE|Ethernet0|0": { + "scheduler": "[SCHEDULAR|scheduler.0]", + "wred_profile": "AZURE_LOSSLESS" + }, + "QUEUE|Ethernet0|1": { + "scheduler": "[SCHEDULAR|scheduler.0]" } } diff --git a/tests/mock_tables/asic1/counters_db.json b/tests/mock_tables/asic1/counters_db.json index c364d8599e..f919742157 100644 --- a/tests/mock_tables/asic1/counters_db.json +++ b/tests/mock_tables/asic1/counters_db.json @@ -207,6 +207,108 @@ "Ethernet-BP256": "oid:0x1000000000b06", "Ethernet-BP260": "oid:0x1000000000b08" }, + "COUNTERS_PG_NAME_MAP": { + "Ethernet-BP256:0": "oid:100000000b0f0", + "Ethernet-BP256:1": "oid:100000000b0f1", + "Ethernet-BP256:2": "oid:100000000b0f2", + "Ethernet-BP256:3": "oid:100000000b0f3", + "Ethernet-BP256:4": "oid:100000000b0f4", + "Ethernet-BP256:5": "oid:100000000b0f5", + "Ethernet-BP256:6": "oid:100000000b0f6", + "Ethernet-BP256:7": "oid:100000000b0f7", + "Ethernet-BP256:8": "oid:100000000b0f8", + "Ethernet-BP256:9": "oid:100000000b0f9", + "Ethernet-BP256:10": "oid:100000000b0fa", + "Ethernet-BP256:11": "oid:100000000b0fb", + "Ethernet-BP256:12": "oid:100000000b0fc", + "Ethernet-BP256:13": "oid:100000000b0fd", + "Ethernet-BP256:14": "oid:100000000b0fe", + "Ethernet-BP256:15": "oid:100000000b0ff", + "Ethernet-BP260:0": "oid:0x100000000b1f0", + "Ethernet-BP260:1": "oid:0x100000000b1f1", + "Ethernet-BP260:2": "oid:0x100000000b1f2", + "Ethernet-BP260:3": "oid:0x100000000b1f3", + "Ethernet-BP260:4": "oid:0x100000000b1f4", + "Ethernet-BP260:5": "oid:0x100000000b1f5", + "Ethernet-BP260:6": "oid:0x100000000b1f6", + "Ethernet-BP260:7": "oid:0x100000000b1f7", + "Ethernet-BP260:8": "oid:0x100000000b1f8", + "Ethernet-BP260:9": "oid:0x100000000b1f9", + "Ethernet-BP260:10": "oid:0x100000000b1fa", + "Ethernet-BP260:11": "oid:0x100000000b1fb", + "Ethernet-BP260:12": "oid:0x100000000b1fc", + "Ethernet-BP260:13": "oid:0x100000000b1fd", + "Ethernet-BP260:14": "oid:0x100000000b1fe", + "Ethernet-BP260:15": "oid:0x100000000b1ff" + }, + "COUNTERS_PG_PORT_MAP": { + "oid:100000000b0f0": "oid:0x1000000000b06", + "oid:100000000b0f1": "oid:0x1000000000b06", + "oid:100000000b0f2": "oid:0x1000000000b06", + "oid:100000000b0f3": "oid:0x1000000000b06", + "oid:100000000b0f4": "oid:0x1000000000b06", + "oid:100000000b0f5": "oid:0x1000000000b06", + "oid:100000000b0f6": "oid:0x1000000000b06", + "oid:100000000b0f7": "oid:0x1000000000b06", + "oid:100000000b0f8": "oid:0x1000000000b06", + "oid:100000000b0f9": "oid:0x1000000000b06", + "oid:100000000b0fa": "oid:0x1000000000b06", + "oid:100000000b0fb": "oid:0x1000000000b06", + "oid:100000000b0fc": "oid:0x1000000000b06", + "oid:100000000b0fd": "oid:0x1000000000b06", + "oid:100000000b0fe": "oid:0x1000000000b06", + "oid:100000000b0ff": "oid:0x1000000000b06", + "oid:0x100000000b1f0": "oid:0x1000000000b08", + "oid:0x100000000b1f1": "oid:0x1000000000b08", + "oid:0x100000000b1f2": "oid:0x1000000000b08", + "oid:0x100000000b1f3": "oid:0x1000000000b08", + "oid:0x100000000b1f4": "oid:0x1000000000b08", + "oid:0x100000000b1f5": "oid:0x1000000000b08", + "oid:0x100000000b1f6": "oid:0x1000000000b08", + "oid:0x100000000b1f7": "oid:0x1000000000b08", + "oid:0x100000000b1f8": "oid:0x1000000000b08", + "oid:0x100000000b1f9": "oid:0x1000000000b08", + "oid:0x100000000b1fa": "oid:0x1000000000b08", + "oid:0x100000000b1fb": "oid:0x1000000000b08", + "oid:0x100000000b1fc": "oid:0x1000000000b08", + "oid:0x100000000b1fd": "oid:0x1000000000b08", + "oid:0x100000000b1fe": "oid:0x1000000000b08", + "oid:0x100000000b1ff" : "oid:0x1000000000b08" + }, + "COUNTERS_PG_INDEX_MAP": { + "oid:100000000b0f0": "0", + "oid:100000000b0f1": "1", + "oid:100000000b0f2": "2", + "oid:100000000b0f3": "3", + "oid:100000000b0f4": "4", + "oid:100000000b0f5": "5", + "oid:100000000b0f6": "6", + "oid:100000000b0f7": "7", + "oid:100000000b0f8": "8", + "oid:100000000b0f9": "9", + "oid:100000000b0fa": "10", + "oid:100000000b0fb": "11", + "oid:100000000b0fc": "12", + "oid:100000000b0fd": "13", + "oid:100000000b0fe": "14", + "oid:100000000b0ff": "15", + "oid:0x100000000b1f0": "0", + "oid:0x100000000b1f1": "1", + "oid:0x100000000b1f2": "2", + "oid:0x100000000b1f3": "3", + "oid:0x100000000b1f4": "4", + "oid:0x100000000b1f5": "5", + "oid:0x100000000b1f6": "6", + "oid:0x100000000b1f7": "7", + "oid:0x100000000b1f8": "8", + "oid:0x100000000b1f9": "9", + "oid:0x100000000b1fa": "10", + "oid:0x100000000b1fb": "11", + "oid:0x100000000b1fc": "12", + "oid:0x100000000b1fd": "13", + "oid:0x100000000b1fe": "14", + "oid:0x100000000b1ff" : "15" + }, "COUNTERS_LAG_NAME_MAP": { "PortChannel0001": "oid:0x60000000005a1", "PortChannel0002": "oid:0x60000000005a2", diff --git a/tests/mock_tables/chassis_state_db.json b/tests/mock_tables/chassis_state_db.json index 5178c49ca0..6af9e19da4 100644 --- a/tests/mock_tables/chassis_state_db.json +++ b/tests/mock_tables/chassis_state_db.json @@ -4,6 +4,9 @@ }, "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD1": { "module_hostname": "sonic-lc2" + }, + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD2": { + "module_hostname": "sonic-lc3" } } \ No newline at end of file diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index af37538447..108fa7593d 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -848,6 +848,8 @@ "FEATURE|lldp": { "state": "enabled", "auto_restart": "enabled", + "has_global_scope": "False", + "has_per_asic_scope": "True", "high_mem_alert": "disabled", "set_owner": "kube" }, diff --git a/tests/mock_tables/counters_db.json b/tests/mock_tables/counters_db.json index d62c34cb3c..2f16c7014d 100644 --- a/tests/mock_tables/counters_db.json +++ b/tests/mock_tables/counters_db.json @@ -402,145 +402,169 @@ "SAI_QUEUE_STAT_BYTES": "0", "SAI_QUEUE_STAT_DROPPED_BYTES": "0", "SAI_QUEUE_STAT_DROPPED_PACKETS": "0", - "SAI_QUEUE_STAT_PACKETS": "0" + "SAI_QUEUE_STAT_PACKETS": "0", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "0" }, "COUNTERS:oid:0x15000000000658": { "SAI_QUEUE_STAT_BYTES": "43", "SAI_QUEUE_STAT_DROPPED_BYTES": "1", "SAI_QUEUE_STAT_DROPPED_PACKETS": "39", - "SAI_QUEUE_STAT_PACKETS": "60" + "SAI_QUEUE_STAT_PACKETS": "60", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "1" }, "COUNTERS:oid:0x15000000000659": { "SAI_QUEUE_STAT_BYTES": "7", "SAI_QUEUE_STAT_DROPPED_BYTES": "21", "SAI_QUEUE_STAT_DROPPED_PACKETS": "39", - "SAI_QUEUE_STAT_PACKETS": "82" + "SAI_QUEUE_STAT_PACKETS": "82", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "19" }, "COUNTERS:oid:0x1500000000065a": { "SAI_QUEUE_STAT_BYTES": "59", "SAI_QUEUE_STAT_DROPPED_BYTES": "94", "SAI_QUEUE_STAT_DROPPED_PACKETS": "12", - "SAI_QUEUE_STAT_PACKETS": "11" + "SAI_QUEUE_STAT_PACKETS": "11", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "12" }, "COUNTERS:oid:0x1500000000065b": { "SAI_QUEUE_STAT_BYTES": "62", "SAI_QUEUE_STAT_DROPPED_BYTES": "40", "SAI_QUEUE_STAT_DROPPED_PACKETS": "35", - "SAI_QUEUE_STAT_PACKETS": "36" + "SAI_QUEUE_STAT_PACKETS": "36", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "11" }, "COUNTERS:oid:0x1500000000065c": { "SAI_QUEUE_STAT_BYTES": "91", "SAI_QUEUE_STAT_DROPPED_BYTES": "88", "SAI_QUEUE_STAT_DROPPED_PACKETS": "2", - "SAI_QUEUE_STAT_PACKETS": "49" + "SAI_QUEUE_STAT_PACKETS": "49", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "15" }, "COUNTERS:oid:0x1500000000065d": { "SAI_QUEUE_STAT_BYTES": "17", "SAI_QUEUE_STAT_DROPPED_BYTES": "74", "SAI_QUEUE_STAT_DROPPED_PACKETS": "94", - "SAI_QUEUE_STAT_PACKETS": "33" + "SAI_QUEUE_STAT_PACKETS": "33", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "17" }, "COUNTERS:oid:0x1500000000065e": { "SAI_QUEUE_STAT_BYTES": "71", "SAI_QUEUE_STAT_DROPPED_BYTES": "33", "SAI_QUEUE_STAT_DROPPED_PACKETS": "95", - "SAI_QUEUE_STAT_PACKETS": "40" + "SAI_QUEUE_STAT_PACKETS": "40", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "73" }, "COUNTERS:oid:0x15000000000667": { "SAI_QUEUE_STAT_BYTES": "8", "SAI_QUEUE_STAT_DROPPED_BYTES": "78", "SAI_QUEUE_STAT_DROPPED_PACKETS": "93", - "SAI_QUEUE_STAT_PACKETS": "54" + "SAI_QUEUE_STAT_PACKETS": "54", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "29" }, "COUNTERS:oid:0x15000000000668": { "SAI_QUEUE_STAT_BYTES": "96", "SAI_QUEUE_STAT_DROPPED_BYTES": "9", "SAI_QUEUE_STAT_DROPPED_PACKETS": "74", - "SAI_QUEUE_STAT_PACKETS": "83" + "SAI_QUEUE_STAT_PACKETS": "83", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "42" }, "COUNTERS:oid:0x15000000000669": { "SAI_QUEUE_STAT_BYTES": "60", "SAI_QUEUE_STAT_DROPPED_BYTES": "31", "SAI_QUEUE_STAT_DROPPED_PACKETS": "61", - "SAI_QUEUE_STAT_PACKETS": "15" + "SAI_QUEUE_STAT_PACKETS": "15", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "54" }, "COUNTERS:oid:0x1500000000066a": { "SAI_QUEUE_STAT_BYTES": "52", "SAI_QUEUE_STAT_DROPPED_BYTES": "94", "SAI_QUEUE_STAT_DROPPED_PACKETS": "82", - "SAI_QUEUE_STAT_PACKETS": "45" + "SAI_QUEUE_STAT_PACKETS": "45", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "19" }, "COUNTERS:oid:0x1500000000066b": { "SAI_QUEUE_STAT_BYTES": "88", "SAI_QUEUE_STAT_DROPPED_BYTES": "52", "SAI_QUEUE_STAT_DROPPED_PACKETS": "89", - "SAI_QUEUE_STAT_PACKETS": "55" + "SAI_QUEUE_STAT_PACKETS": "55", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "28" }, "COUNTERS:oid:0x1500000000066c": { "SAI_QUEUE_STAT_BYTES": "70", "SAI_QUEUE_STAT_DROPPED_BYTES": "79", "SAI_QUEUE_STAT_DROPPED_PACKETS": "95", - "SAI_QUEUE_STAT_PACKETS": "14" + "SAI_QUEUE_STAT_PACKETS": "14", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "13" }, "COUNTERS:oid:0x1500000000066d": { "SAI_QUEUE_STAT_BYTES": "60", "SAI_QUEUE_STAT_DROPPED_BYTES": "81", "SAI_QUEUE_STAT_DROPPED_PACKETS": "66", - "SAI_QUEUE_STAT_PACKETS": "68" + "SAI_QUEUE_STAT_PACKETS": "68", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "22" }, "COUNTERS:oid:0x1500000000066e": { "SAI_QUEUE_STAT_BYTES": "4", "SAI_QUEUE_STAT_DROPPED_BYTES": "76", "SAI_QUEUE_STAT_DROPPED_PACKETS": "48", - "SAI_QUEUE_STAT_PACKETS": "63" + "SAI_QUEUE_STAT_PACKETS": "63", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "53" }, "COUNTERS:oid:0x15000000000677": { "SAI_QUEUE_STAT_BYTES": "73", "SAI_QUEUE_STAT_DROPPED_BYTES": "74", "SAI_QUEUE_STAT_DROPPED_PACKETS": "77", - "SAI_QUEUE_STAT_PACKETS": "41" + "SAI_QUEUE_STAT_PACKETS": "41", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "67" }, "COUNTERS:oid:0x15000000000678": { "SAI_QUEUE_STAT_BYTES": "21", "SAI_QUEUE_STAT_DROPPED_BYTES": "54", "SAI_QUEUE_STAT_DROPPED_PACKETS": "56", - "SAI_QUEUE_STAT_PACKETS": "60" + "SAI_QUEUE_STAT_PACKETS": "60", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "79" }, "COUNTERS:oid:0x15000000000679": { "SAI_QUEUE_STAT_BYTES": "31", "SAI_QUEUE_STAT_DROPPED_BYTES": "39", "SAI_QUEUE_STAT_DROPPED_PACKETS": "12", - "SAI_QUEUE_STAT_PACKETS": "57" + "SAI_QUEUE_STAT_PACKETS": "57", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "35" }, "COUNTERS:oid:0x1500000000067a": { "SAI_QUEUE_STAT_BYTES": "96", "SAI_QUEUE_STAT_DROPPED_BYTES": "98", "SAI_QUEUE_STAT_DROPPED_PACKETS": "70", - "SAI_QUEUE_STAT_PACKETS": "41" + "SAI_QUEUE_STAT_PACKETS": "41", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "47" }, "COUNTERS:oid:0x1500000000067b": { "SAI_QUEUE_STAT_BYTES": "49", "SAI_QUEUE_STAT_DROPPED_BYTES": "36", "SAI_QUEUE_STAT_DROPPED_PACKETS": "63", - "SAI_QUEUE_STAT_PACKETS": "18" + "SAI_QUEUE_STAT_PACKETS": "18", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "62" }, "COUNTERS:oid:0x1500000000067c": { "SAI_QUEUE_STAT_BYTES": "90", "SAI_QUEUE_STAT_DROPPED_BYTES": "15", "SAI_QUEUE_STAT_DROPPED_PACKETS": "3", - "SAI_QUEUE_STAT_PACKETS": "99" + "SAI_QUEUE_STAT_PACKETS": "99", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "19" }, "COUNTERS:oid:0x1500000000067d": { "SAI_QUEUE_STAT_BYTES": "84", "SAI_QUEUE_STAT_DROPPED_BYTES": "94", "SAI_QUEUE_STAT_DROPPED_PACKETS": "82", - "SAI_QUEUE_STAT_PACKETS": "8" + "SAI_QUEUE_STAT_PACKETS": "8", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "33" }, "COUNTERS:oid:0x1500000000067e": { "SAI_QUEUE_STAT_BYTES": "15", "SAI_QUEUE_STAT_DROPPED_BYTES": "92", "SAI_QUEUE_STAT_DROPPED_PACKETS": "75", - "SAI_QUEUE_STAT_PACKETS": "83" + "SAI_QUEUE_STAT_PACKETS": "83", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "3" }, "COUNTERS:oid:0x60000000005a3": { "SAI_ROUTER_INTERFACE_STAT_IN_ERROR_OCTETS": "0", @@ -982,7 +1006,8 @@ }, "COUNTERS:oid:0x21000000000000": { "SAI_SWITCH_STAT_OUT_DROP_REASON_RANGE_BASE": "1000", - "SAI_SWITCH_STAT_OUT_CONFIGURED_DROP_REASONS_1_DROPPED_PKTS": "0" + "SAI_SWITCH_STAT_OUT_CONFIGURED_DROP_REASONS_1_DROPPED_PKTS": "0", + "SAI_SWITCH_STAT_PACKET_INTEGRITY_DROP": "500" }, "COUNTERS:oid:0x1a00000000034f": { @@ -1772,7 +1797,8 @@ }, "COUNTERS_DEBUG_NAME_SWITCH_STAT_MAP": { "DEBUG_1": "SAI_SWITCH_STAT_OUT_DROP_REASON_RANGE_BASE", - "lowercase_counter": "SAI_SWITCH_STAT_OUT_CONFIGURED_DROP_REASONS_1_DROPPED_PKTS" + "lowercase_counter": "SAI_SWITCH_STAT_OUT_CONFIGURED_DROP_REASONS_1_DROPPED_PKTS", + "SWITCH_STD_DROP_COUNTER-SAI_SWITCH_STAT_PACKET_INTEGRITY_DROP": "SAI_SWITCH_STAT_PACKET_INTEGRITY_DROP" }, "COUNTERS:oid:0x1500000000035a": { "PFC_WD_ACTION": "drop", diff --git a/tests/multi_asic_dropstat_test.py b/tests/multi_asic_dropstat_test.py new file mode 100644 index 0000000000..8b9dd72826 --- /dev/null +++ b/tests/multi_asic_dropstat_test.py @@ -0,0 +1,122 @@ +import os +import sys +from .utils import get_result_and_return_code + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, test_path) +sys.path.insert(0, modules_path) + +dropstat_masic_result_asic0 = """\ + IFACE STATE RX_ERR RX_DROPS TX_ERR TX_DROPS DEBUG_0 DEBUG_2 +------------ ------- -------- ---------- -------- ---------- --------- --------- + Ethernet0 U 10 100 0 0 80 20 + Ethernet4 U 0 1000 0 0 800 100 +Ethernet-BP0 U 0 1000 0 0 800 100 +Ethernet-BP4 U 0 1000 0 0 800 100 + + DEVICE DEBUG_1 +---------------- --------- +sonic_drops_test 1000 +""" + +dropstat_masic_result_asic1 = """\ + IFACE STATE RX_ERR RX_DROPS TX_ERR TX_DROPS DEBUG_0 DEBUG_2 +-------------- ------- -------- ---------- -------- ---------- --------- --------- +Ethernet-BP256 U 10 100 0 0 80 20 +Ethernet-BP260 U 0 1000 0 0 800 100 + + DEVICE DEBUG_1 +---------------- --------- +sonic_drops_test 1000 +""" + +dropstat_masic_result_clear_all = """\ + IFACE STATE RX_ERR RX_DROPS TX_ERR TX_DROPS DEBUG_0 DEBUG_2 +------------ ------- -------- ---------- -------- ---------- --------- --------- + Ethernet0 U 0 0 0 0 0 0 + Ethernet4 U 0 0 0 0 0 0 +Ethernet-BP0 U 0 0 0 0 0 0 +Ethernet-BP4 U 0 0 0 0 0 0 + + DEVICE DEBUG_1 +---------------- --------- +sonic_drops_test 0 + IFACE STATE RX_ERR RX_DROPS TX_ERR TX_DROPS DEBUG_0 DEBUG_2 +-------------- ------- -------- ---------- -------- ---------- --------- --------- +Ethernet-BP256 U 0 0 0 0 0 0 +Ethernet-BP260 U 0 0 0 0 0 0 + + DEVICE DEBUG_1 +---------------- --------- +sonic_drops_test 0 +""" + + +class TestMultiAsicDropstat(object): + @classmethod + def setup_class(cls): + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "1" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + print("SETUP") + + def test_show_dropcount_masic_asic0(self): + os.environ["UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE"] = "1" + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'show', '-n', 'asic0' + ]) + os.environ.pop("UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE") + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == dropstat_masic_result_asic0 and return_code == 0 + + def test_show_dropcount_masic_all_and_clear(self): + os.environ["UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE"] = "1" + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'show' + ]) + os.environ.pop("UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE") + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == dropstat_masic_result_asic0 + dropstat_masic_result_asic1 + assert return_code == 0 + + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'clear' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == 'Cleared drop counters\n' and return_code == 0 + + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'show' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == dropstat_masic_result_clear_all and return_code == 0 + + def test_show_dropcount_masic_invalid_ns(self): + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'show', '-n', 'asic5' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 2 + assert "invalid choice: asic5" in result + + def test_show_dropcount_version(self): + return_code, result = get_result_and_return_code([ + 'dropstat', '--version' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + + @classmethod + def teardown_class(cls): + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ.pop("UTILITIES_UNIT_TESTING") + os.environ.pop("UTILITIES_UNIT_TESTING_TOPOLOGY") + print("TEARDOWN") diff --git a/tests/multi_asic_ecnconfig_test.py b/tests/multi_asic_ecnconfig_test.py new file mode 100644 index 0000000000..034a517ace --- /dev/null +++ b/tests/multi_asic_ecnconfig_test.py @@ -0,0 +1,64 @@ +import os +import sys +from .ecn_test import TestEcnConfigBase +from .ecn_input.ecn_test_vectors import testData + +root_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(root_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, root_path) +sys.path.insert(0, modules_path) + + +class TestEcnConfigMultiAsic(TestEcnConfigBase): + @classmethod + def setup_class(cls): + super().setup_class() + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + + def test_ecn_show_config_all_masic(self): + self.executor(testData['ecn_show_config_masic']) + + def test_ecn_show_config_all_verbose_masic(self): + self.executor(testData['test_ecn_show_config_verbose_masic']) + + def test_ecn_show_config_one_masic(self): + self.executor(testData['test_ecn_show_config_namespace']) + + def test_ecn_show_config_one_verbose_masic(self): + self.executor(testData['test_ecn_show_config_namespace_verbose']) + + def test_ecn_config_change_other_threshold_masic(self): + self.executor(testData['ecn_cfg_threshold_masic']) + + def test_ecn_config_change_other_prob_masic(self): + self.executor(testData['ecn_cfg_probability_masic']) + + def test_ecn_config_change_gdrop_verbose_all_masic(self): + self.executor(testData['ecn_cfg_gdrop_verbose_all_masic']) + + def test_ecn_config_multi_set_verbose_all_masic(self): + self.executor(testData['ecn_cfg_multi_set_verbose_all_masic']) + + def test_ecn_queue_get_masic(self): + self.executor(testData['ecn_q_get_masic']) + + def test_ecn_queue_get_verbose_masic(self): + self.executor(testData['ecn_q_get_verbose_masic']) + + def test_ecn_queue_get_all_masic(self): + self.executor(testData['ecn_q_get_all_ns_masic']) + + def test_ecn_queue_get_all_verbose_masic(self): + self.executor(testData['ecn_q_get_all_ns_verbose_masic']) + + def test_ecn_q_set_off_all_masic(self): + self.executor(testData['ecn_cfg_q_all_ns_off_masic']) + + def test_ecn_q_set_off_one_masic(self): + self.executor(testData['ecn_cfg_q_one_ns_off_verbose_masic']) + + @classmethod + def teardown_class(cls): + super().teardown_class() + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" diff --git a/tests/multi_asic_pgdropstat_test.py b/tests/multi_asic_pgdropstat_test.py new file mode 100644 index 0000000000..94bb13011b --- /dev/null +++ b/tests/multi_asic_pgdropstat_test.py @@ -0,0 +1,95 @@ +import os +import sys +from utilities_common.cli import UserCache +from .utils import get_result_and_return_code + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, test_path) +sys.path.insert(0, modules_path) + +pg_drop_masic_one_result = """\ +Ingress PG dropped packets: + Port PG0 PG1 PG2 PG3 PG4 PG5 PG6 PG7 PG8 PG9 PG10 PG11 PG12 PG13\ + PG14 PG15 +-------------- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ------ ------ ------ ------\ + ------ ------ +Ethernet-BP256 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ + N/A N/A +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ + N/A N/A +""" + +pg_drop_masic_all_result = """\ +Ingress PG dropped packets: + Port PG0 PG1 PG2 PG3 PG4 PG5 PG6 PG7 PG8 PG9 PG10 PG11 PG12 PG13\ + PG14 PG15 +-------------- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ------ ------ ------ ------\ + ------ ------ + Ethernet0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ + 0 0 + Ethernet4 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ + 0 0 + Ethernet-BP0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ + 0 0 + Ethernet-BP4 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ + 0 0 +Ethernet-BP256 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ + N/A N/A +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ + N/A N/A +""" + + +class TestMultiAsicPgDropstat(object): + @classmethod + def setup_class(cls): + os.environ["PATH"] += os.pathsep + scripts_path + os.environ['UTILITIES_UNIT_TESTING'] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + print("SETUP") + + def test_show_pg_drop_masic_all(self): + return_code, result = get_result_and_return_code([ + 'pg-drop', '-c', 'show' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == pg_drop_masic_all_result + + def test_show_pg_drop_masic(self): + return_code, result = get_result_and_return_code([ + 'pg-drop', '-c', 'show', '-n', 'asic1' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == pg_drop_masic_one_result + + def test_show_pg_drop_masic_not_exist(self): + return_code, result = get_result_and_return_code([ + 'pg-drop', '-c', 'show', '-n', 'asic5' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 1 + assert result == "Input value for '--namespace' / '-n'. Choose from one of (asic0, asic1)" + + def test_clear_pg_drop(self): + return_code, result = get_result_and_return_code([ + 'pg-drop', '-c', 'clear' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == "Cleared PG drop counter\n" + + @classmethod + def teardown_class(cls): + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ['UTILITIES_UNIT_TESTING'] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + UserCache('pg-drop').remove_all() + print("TEARDOWN") diff --git a/tests/pbh_input/assert_show_output.py b/tests/pbh_input/assert_show_output.py index 7a701ba4bc..5538f3aada 100644 --- a/tests/pbh_input/assert_show_output.py +++ b/tests/pbh_input/assert_show_output.py @@ -78,6 +78,14 @@ """ +show_pbh_statistics_partial = """\ +TABLE RULE RX PACKETS COUNT RX BYTES COUNT +---------- ------ ------------------ ---------------- +pbh_table1 nvgre 100 200 +pbh_table2 vxlan 0 0 +""" + + show_pbh_statistics_updated="""\ TABLE RULE RX PACKETS COUNT RX BYTES COUNT ---------- ------ ------------------ ---------------- diff --git a/tests/pbh_input/counters_db_partial.json b/tests/pbh_input/counters_db_partial.json new file mode 100644 index 0000000000..aa140188c8 --- /dev/null +++ b/tests/pbh_input/counters_db_partial.json @@ -0,0 +1,11 @@ +{ + "COUNTERS:oid:0x9000000000000": { }, + "COUNTERS:oid:0x9000000000001": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "300", + "SAI_ACL_COUNTER_ATTR_BYTES": "400" + }, + "ACL_COUNTER_RULE_MAP": { + "pbh_table1:nvgre": "oid:0x9000000000000", + "pbh_table2:vxlan": "oid:0x9000000000001" + } +} diff --git a/tests/pbh_test.py b/tests/pbh_test.py index 7dddfea9ca..0d68f458ee 100644 --- a/tests/pbh_test.py +++ b/tests/pbh_test.py @@ -946,6 +946,34 @@ def test_show_pbh_statistics_after_clear(self): assert result.exit_code == SUCCESS assert result.output == assert_show_output.show_pbh_statistics_zero + def test_show_pbh_statistics_after_clear_and_counters_partial(self): + dbconnector.dedicated_dbs['COUNTERS_DB'] = os.path.join(mock_db_path, 'counters_db_partial') + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'full_pbh_config') + + self.remove_pbh_counters_file() + + db = Db() + runner = CliRunner() + + result = runner.invoke( + clear.cli.commands["pbh"]. + commands["statistics"], [], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + + dbconnector.dedicated_dbs['COUNTERS_DB'] = os.path.join(mock_db_path, 'counters_db') + + result = runner.invoke( + show.cli.commands["pbh"]. + commands["statistics"], [], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + assert result.output == assert_show_output.show_pbh_statistics_partial def test_show_pbh_statistics_after_clear_and_counters_updated(self): dbconnector.dedicated_dbs['COUNTERS_DB'] = os.path.join(mock_db_path, 'counters_db') diff --git a/tests/pfc_input/assert_show_output.py b/tests/pfc_input/assert_show_output.py new file mode 100644 index 0000000000..2406f8b49f --- /dev/null +++ b/tests/pfc_input/assert_show_output.py @@ -0,0 +1,82 @@ +pfc_asym_cannot_find_intf = """\ + +Interface Asymmetric +----------- ------------ + +""" + +pfc_cannot_find_intf = """\ +Cannot find interface Ethernet1234 +""" + +pfc_show_asymmetric_all = """\ + +Interface Asymmetric +----------- ------------ +Ethernet0 off +Ethernet4 off +Ethernet8 off +Ethernet12 off +Ethernet16 off +Ethernet20 off +Ethernet24 off +Ethernet28 off +Ethernet32 off +Ethernet36 off +Ethernet40 off +Ethernet44 off +Ethernet48 off +Ethernet52 off +Ethernet56 off +Ethernet60 off +Ethernet64 off +Ethernet68 off +Ethernet72 off +Ethernet76 off +Ethernet80 off +Ethernet84 off +Ethernet88 off +Ethernet92 off +Ethernet96 off +Ethernet100 off +Ethernet104 off +Ethernet108 off +Ethernet112 off +Ethernet116 off +Ethernet120 off +Ethernet124 off + +""" + +pfc_show_asymmetric_intf = """\ + +Interface Asymmetric +----------- ------------ +Ethernet0 off + +""" + +pfc_show_priority_all = """\ + +Interface Lossless priorities +----------- --------------------- +Ethernet0 3,4 +Ethernet4 3,4 + +""" + +pfc_show_priority_intf = """\ + +Interface Lossless priorities +----------- --------------------- +Ethernet0 3,4 + +""" + +pfc_config_priority_on = """\ + +Interface Lossless priorities +----------- --------------------- +Ethernet0 3,4,5 + +""" diff --git a/tests/pfc_test.py b/tests/pfc_test.py new file mode 100644 index 0000000000..101aa476cc --- /dev/null +++ b/tests/pfc_test.py @@ -0,0 +1,81 @@ +import os +import sys +import pfc.main as pfc +from .pfc_input.assert_show_output import pfc_cannot_find_intf, pfc_show_asymmetric_all, \ + pfc_show_asymmetric_intf, pfc_show_priority_all, pfc_show_priority_intf, \ + pfc_config_priority_on, pfc_asym_cannot_find_intf +from utilities_common.db import Db + +from click.testing import CliRunner +from importlib import reload + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "pfc") +sys.path.insert(0, test_path) +sys.path.insert(0, modules_path) + + +class TestPfcBase(object): + + def executor(self, cliobj, command, expected_rc=0, expected_output=None, expected_cfgdb_entry=None, + runner=CliRunner()): + db = Db() + result = runner.invoke(cliobj, command, obj=db) + print(result.exit_code) + print(result.output) + + if result.exit_code != expected_rc: + print(result.exception) + assert result.exit_code == expected_rc + + if expected_output: + assert result.output == expected_output + + if expected_cfgdb_entry: + (table, key, field, expected_val) = expected_cfgdb_entry + configdb = db.cfgdb + entry = configdb.get_entry(table, key) + assert entry.get(field) == expected_val + + +class TestPfc(TestPfcBase): + + @classmethod + def setup_class(cls): + from mock_tables import dbconnector + from mock_tables import mock_single_asic + reload(mock_single_asic) + dbconnector.load_namespace_config() + + def test_pfc_show_asymmetric_all(self): + self.executor(pfc.cli, ['show', 'asymmetric'], + expected_output=pfc_show_asymmetric_all) + + def test_pfc_show_asymmetric_intf(self): + self.executor(pfc.cli, ['show', 'asymmetric', 'Ethernet0'], + expected_output=pfc_show_asymmetric_intf) + + def test_pfc_show_asymmetric_intf_fake(self): + self.executor(pfc.cli, ['show', 'asymmetric', 'Ethernet1234'], + expected_output=pfc_asym_cannot_find_intf) + + def test_pfc_show_priority_all(self): + self.executor(pfc.cli, ['show', 'priority'], + expected_output=pfc_show_priority_all) + + def test_pfc_show_priority_intf(self): + self.executor(pfc.cli, ['show', 'priority', 'Ethernet0'], + expected_output=pfc_show_priority_intf) + + def test_pfc_show_priority_intf_fake(self): + self.executor(pfc.cli, ['show', 'priority', 'Ethernet1234'], + expected_output=pfc_cannot_find_intf) + + def test_pfc_config_asymmetric(self): + self.executor(pfc.cli, ['config', 'asymmetric', 'on', 'Ethernet0'], + expected_cfgdb_entry=('PORT', 'Ethernet0', 'pfc_asym', 'on')) + + def test_pfc_config_priority(self): + self.executor(pfc.cli, ['config', 'priority', 'on', 'Ethernet0', '5'], + expected_output=pfc_config_priority_on) diff --git a/tests/queue_counter_test.py b/tests/queue_counter_test.py index 20b9516fbc..391d004872 100644 --- a/tests/queue_counter_test.py +++ b/tests/queue_counter_test.py @@ -1851,136 +1851,136 @@ show_queue_voq_counters = """\ - Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------------- ----- -------------- --------------- ----------- ------------ -testsw|Ethernet0 VOQ0 0 0 0 0 -testsw|Ethernet0 VOQ1 60 43 39 1 -testsw|Ethernet0 VOQ2 82 7 39 21 -testsw|Ethernet0 VOQ3 11 59 12 94 -testsw|Ethernet0 VOQ4 36 62 35 40 -testsw|Ethernet0 VOQ5 49 91 2 88 -testsw|Ethernet0 VOQ6 33 17 94 74 -testsw|Ethernet0 VOQ7 40 71 95 33 + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts +---------------- ----- -------------- --------------- ----------- ------------ -------------------- +testsw|Ethernet0 VOQ0 0 0 0 0 0 +testsw|Ethernet0 VOQ1 60 43 39 1 1 +testsw|Ethernet0 VOQ2 82 7 39 21 19 +testsw|Ethernet0 VOQ3 11 59 12 94 12 +testsw|Ethernet0 VOQ4 36 62 35 40 11 +testsw|Ethernet0 VOQ5 49 91 2 88 15 +testsw|Ethernet0 VOQ6 33 17 94 74 17 +testsw|Ethernet0 VOQ7 40 71 95 33 73 - Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------------- ----- -------------- --------------- ----------- ------------ -testsw|Ethernet4 VOQ0 54 8 93 78 -testsw|Ethernet4 VOQ1 83 96 74 9 -testsw|Ethernet4 VOQ2 15 60 61 31 -testsw|Ethernet4 VOQ3 45 52 82 94 -testsw|Ethernet4 VOQ4 55 88 89 52 -testsw|Ethernet4 VOQ5 14 70 95 79 -testsw|Ethernet4 VOQ6 68 60 66 81 -testsw|Ethernet4 VOQ7 63 4 48 76 + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts +---------------- ----- -------------- --------------- ----------- ------------ -------------------- +testsw|Ethernet4 VOQ0 54 8 93 78 29 +testsw|Ethernet4 VOQ1 83 96 74 9 42 +testsw|Ethernet4 VOQ2 15 60 61 31 54 +testsw|Ethernet4 VOQ3 45 52 82 94 19 +testsw|Ethernet4 VOQ4 55 88 89 52 28 +testsw|Ethernet4 VOQ5 14 70 95 79 13 +testsw|Ethernet4 VOQ6 68 60 66 81 22 +testsw|Ethernet4 VOQ7 63 4 48 76 53 - Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------------- ----- -------------- --------------- ----------- ------------ -testsw|Ethernet8 VOQ0 41 73 77 74 -testsw|Ethernet8 VOQ1 60 21 56 54 -testsw|Ethernet8 VOQ2 57 31 12 39 -testsw|Ethernet8 VOQ3 41 96 70 98 -testsw|Ethernet8 VOQ4 18 49 63 36 -testsw|Ethernet8 VOQ5 99 90 3 15 -testsw|Ethernet8 VOQ6 8 84 82 94 -testsw|Ethernet8 VOQ7 83 15 75 92 + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts +---------------- ----- -------------- --------------- ----------- ------------ -------------------- +testsw|Ethernet8 VOQ0 41 73 77 74 67 +testsw|Ethernet8 VOQ1 60 21 56 54 79 +testsw|Ethernet8 VOQ2 57 31 12 39 35 +testsw|Ethernet8 VOQ3 41 96 70 98 47 +testsw|Ethernet8 VOQ4 18 49 63 36 62 +testsw|Ethernet8 VOQ5 99 90 3 15 19 +testsw|Ethernet8 VOQ6 8 84 82 94 33 +testsw|Ethernet8 VOQ7 83 15 75 92 3 """ show_queue_voq_counters_nz = """\ - Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------------- ----- -------------- --------------- ----------- ------------ -testsw|Ethernet0 VOQ1 60 43 39 1 -testsw|Ethernet0 VOQ2 82 7 39 21 -testsw|Ethernet0 VOQ3 11 59 12 94 -testsw|Ethernet0 VOQ4 36 62 35 40 -testsw|Ethernet0 VOQ5 49 91 2 88 -testsw|Ethernet0 VOQ6 33 17 94 74 -testsw|Ethernet0 VOQ7 40 71 95 33 + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts +---------------- ----- -------------- --------------- ----------- ------------ -------------------- +testsw|Ethernet0 VOQ1 60 43 39 1 1 +testsw|Ethernet0 VOQ2 82 7 39 21 19 +testsw|Ethernet0 VOQ3 11 59 12 94 12 +testsw|Ethernet0 VOQ4 36 62 35 40 11 +testsw|Ethernet0 VOQ5 49 91 2 88 15 +testsw|Ethernet0 VOQ6 33 17 94 74 17 +testsw|Ethernet0 VOQ7 40 71 95 33 73 - Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------------- ----- -------------- --------------- ----------- ------------ -testsw|Ethernet4 VOQ0 54 8 93 78 -testsw|Ethernet4 VOQ1 83 96 74 9 -testsw|Ethernet4 VOQ2 15 60 61 31 -testsw|Ethernet4 VOQ3 45 52 82 94 -testsw|Ethernet4 VOQ4 55 88 89 52 -testsw|Ethernet4 VOQ5 14 70 95 79 -testsw|Ethernet4 VOQ6 68 60 66 81 -testsw|Ethernet4 VOQ7 63 4 48 76 + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts +---------------- ----- -------------- --------------- ----------- ------------ -------------------- +testsw|Ethernet4 VOQ0 54 8 93 78 29 +testsw|Ethernet4 VOQ1 83 96 74 9 42 +testsw|Ethernet4 VOQ2 15 60 61 31 54 +testsw|Ethernet4 VOQ3 45 52 82 94 19 +testsw|Ethernet4 VOQ4 55 88 89 52 28 +testsw|Ethernet4 VOQ5 14 70 95 79 13 +testsw|Ethernet4 VOQ6 68 60 66 81 22 +testsw|Ethernet4 VOQ7 63 4 48 76 53 - Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------------- ----- -------------- --------------- ----------- ------------ -testsw|Ethernet8 VOQ0 41 73 77 74 -testsw|Ethernet8 VOQ1 60 21 56 54 -testsw|Ethernet8 VOQ2 57 31 12 39 -testsw|Ethernet8 VOQ3 41 96 70 98 -testsw|Ethernet8 VOQ4 18 49 63 36 -testsw|Ethernet8 VOQ5 99 90 3 15 -testsw|Ethernet8 VOQ6 8 84 82 94 -testsw|Ethernet8 VOQ7 83 15 75 92 + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts +---------------- ----- -------------- --------------- ----------- ------------ -------------------- +testsw|Ethernet8 VOQ0 41 73 77 74 67 +testsw|Ethernet8 VOQ1 60 21 56 54 79 +testsw|Ethernet8 VOQ2 57 31 12 39 35 +testsw|Ethernet8 VOQ3 41 96 70 98 47 +testsw|Ethernet8 VOQ4 18 49 63 36 62 +testsw|Ethernet8 VOQ5 99 90 3 15 19 +testsw|Ethernet8 VOQ6 8 84 82 94 33 +testsw|Ethernet8 VOQ7 83 15 75 92 3 """ show_queue_voq_counters_with_clear = ["""\ - Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------------- ----- -------------- --------------- ----------- ------------ -testsw|Ethernet0 VOQ0 0 0 0 0 -testsw|Ethernet0 VOQ1 0 0 0 0 -testsw|Ethernet0 VOQ2 0 0 0 0 -testsw|Ethernet0 VOQ3 0 0 0 0 -testsw|Ethernet0 VOQ4 0 0 0 0 -testsw|Ethernet0 VOQ5 0 0 0 0 -testsw|Ethernet0 VOQ6 0 0 0 0 -testsw|Ethernet0 VOQ7 0 0 0 0 + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts +---------------- ----- -------------- --------------- ----------- ------------ -------------------- +testsw|Ethernet0 VOQ0 0 0 0 0 0 +testsw|Ethernet0 VOQ1 0 0 0 0 0 +testsw|Ethernet0 VOQ2 0 0 0 0 0 +testsw|Ethernet0 VOQ3 0 0 0 0 0 +testsw|Ethernet0 VOQ4 0 0 0 0 0 +testsw|Ethernet0 VOQ5 0 0 0 0 0 +testsw|Ethernet0 VOQ6 0 0 0 0 0 +testsw|Ethernet0 VOQ7 0 0 0 0 0 """, """\ - Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------------- ----- -------------- --------------- ----------- ------------ -testsw|Ethernet4 VOQ0 0 0 0 0 -testsw|Ethernet4 VOQ1 0 0 0 0 -testsw|Ethernet4 VOQ2 0 0 0 0 -testsw|Ethernet4 VOQ3 0 0 0 0 -testsw|Ethernet4 VOQ4 0 0 0 0 -testsw|Ethernet4 VOQ5 0 0 0 0 -testsw|Ethernet4 VOQ6 0 0 0 0 -testsw|Ethernet4 VOQ7 0 0 0 0 + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts +---------------- ----- -------------- --------------- ----------- ------------ -------------------- +testsw|Ethernet4 VOQ0 0 0 0 0 0 +testsw|Ethernet4 VOQ1 0 0 0 0 0 +testsw|Ethernet4 VOQ2 0 0 0 0 0 +testsw|Ethernet4 VOQ3 0 0 0 0 0 +testsw|Ethernet4 VOQ4 0 0 0 0 0 +testsw|Ethernet4 VOQ5 0 0 0 0 0 +testsw|Ethernet4 VOQ6 0 0 0 0 0 +testsw|Ethernet4 VOQ7 0 0 0 0 0 """, """\ - Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------------- ----- -------------- --------------- ----------- ------------ -testsw|Ethernet8 VOQ0 0 0 0 0 -testsw|Ethernet8 VOQ1 0 0 0 0 -testsw|Ethernet8 VOQ2 0 0 0 0 -testsw|Ethernet8 VOQ3 0 0 0 0 -testsw|Ethernet8 VOQ4 0 0 0 0 -testsw|Ethernet8 VOQ5 0 0 0 0 -testsw|Ethernet8 VOQ6 0 0 0 0 -testsw|Ethernet8 VOQ7 0 0 0 0 + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts +---------------- ----- -------------- --------------- ----------- ------------ -------------------- +testsw|Ethernet8 VOQ0 0 0 0 0 0 +testsw|Ethernet8 VOQ1 0 0 0 0 0 +testsw|Ethernet8 VOQ2 0 0 0 0 0 +testsw|Ethernet8 VOQ3 0 0 0 0 0 +testsw|Ethernet8 VOQ4 0 0 0 0 0 +testsw|Ethernet8 VOQ5 0 0 0 0 0 +testsw|Ethernet8 VOQ6 0 0 0 0 0 +testsw|Ethernet8 VOQ7 0 0 0 0 0 """ ] show_queue_port_voq_counters = """\ - Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------------- ----- -------------- --------------- ----------- ------------ -testsw|Ethernet0 VOQ0 0 0 0 0 -testsw|Ethernet0 VOQ1 60 43 39 1 -testsw|Ethernet0 VOQ2 82 7 39 21 -testsw|Ethernet0 VOQ3 11 59 12 94 -testsw|Ethernet0 VOQ4 36 62 35 40 -testsw|Ethernet0 VOQ5 49 91 2 88 -testsw|Ethernet0 VOQ6 33 17 94 74 -testsw|Ethernet0 VOQ7 40 71 95 33 + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts +---------------- ----- -------------- --------------- ----------- ------------ -------------------- +testsw|Ethernet0 VOQ0 0 0 0 0 0 +testsw|Ethernet0 VOQ1 60 43 39 1 1 +testsw|Ethernet0 VOQ2 82 7 39 21 19 +testsw|Ethernet0 VOQ3 11 59 12 94 12 +testsw|Ethernet0 VOQ4 36 62 35 40 11 +testsw|Ethernet0 VOQ5 49 91 2 88 15 +testsw|Ethernet0 VOQ6 33 17 94 74 17 +testsw|Ethernet0 VOQ7 40 71 95 33 73 """ show_queue_port_voq_counters_nz = """\ - Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------------- ----- -------------- --------------- ----------- ------------ -testsw|Ethernet0 VOQ1 60 43 39 1 -testsw|Ethernet0 VOQ2 82 7 39 21 -testsw|Ethernet0 VOQ3 11 59 12 94 -testsw|Ethernet0 VOQ4 36 62 35 40 -testsw|Ethernet0 VOQ5 49 91 2 88 -testsw|Ethernet0 VOQ6 33 17 94 74 -testsw|Ethernet0 VOQ7 40 71 95 33 + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts +---------------- ----- -------------- --------------- ----------- ------------ -------------------- +testsw|Ethernet0 VOQ1 60 43 39 1 1 +testsw|Ethernet0 VOQ2 82 7 39 21 19 +testsw|Ethernet0 VOQ3 11 59 12 94 12 +testsw|Ethernet0 VOQ4 36 62 35 40 11 +testsw|Ethernet0 VOQ5 49 91 2 88 15 +testsw|Ethernet0 VOQ6 33 17 94 74 17 +testsw|Ethernet0 VOQ7 40 71 95 33 73 """ @@ -1988,48 +1988,56 @@ { "testsw|Ethernet0": { "VOQ0": { + "creditWDPkts": "0", "dropbytes": "0", "droppacket": "0", "totalbytes": "0", "totalpacket": "0" }, "VOQ1": { + "creditWDPkts": "1", "dropbytes": "1", "droppacket": "39", "totalbytes": "43", "totalpacket": "60" }, "VOQ2": { + "creditWDPkts": "19", "dropbytes": "21", "droppacket": "39", "totalbytes": "7", "totalpacket": "82" }, "VOQ3": { + "creditWDPkts": "12", "dropbytes": "94", "droppacket": "12", "totalbytes": "59", "totalpacket": "11" }, "VOQ4": { + "creditWDPkts": "11", "dropbytes": "40", "droppacket": "35", "totalbytes": "62", "totalpacket": "36" }, "VOQ5": { + "creditWDPkts": "15", "dropbytes": "88", "droppacket": "2", "totalbytes": "91", "totalpacket": "49" }, "VOQ6": { + "creditWDPkts": "17", "dropbytes": "74", "droppacket": "94", "totalbytes": "17", "totalpacket": "33" }, "VOQ7": { + "creditWDPkts": "73", "dropbytes": "33", "droppacket": "95", "totalbytes": "71", @@ -2038,48 +2046,56 @@ }, "testsw|Ethernet4": { "VOQ0": { + "creditWDPkts": "29", "dropbytes": "78", "droppacket": "93", "totalbytes": "8", "totalpacket": "54" }, "VOQ1": { + "creditWDPkts": "42", "dropbytes": "9", "droppacket": "74", "totalbytes": "96", "totalpacket": "83" }, "VOQ2": { + "creditWDPkts": "54", "dropbytes": "31", "droppacket": "61", "totalbytes": "60", "totalpacket": "15" }, "VOQ3": { + "creditWDPkts": "19", "dropbytes": "94", "droppacket": "82", "totalbytes": "52", "totalpacket": "45" }, "VOQ4": { + "creditWDPkts": "28", "dropbytes": "52", "droppacket": "89", "totalbytes": "88", "totalpacket": "55" }, "VOQ5": { + "creditWDPkts": "13", "dropbytes": "79", "droppacket": "95", "totalbytes": "70", "totalpacket": "14" }, "VOQ6": { + "creditWDPkts": "22", "dropbytes": "81", "droppacket": "66", "totalbytes": "60", "totalpacket": "68" }, "VOQ7": { + "creditWDPkts": "53", "dropbytes": "76", "droppacket": "48", "totalbytes": "4", @@ -2088,48 +2104,56 @@ }, "testsw|Ethernet8": { "VOQ0": { + "creditWDPkts": "67", "dropbytes": "74", "droppacket": "77", "totalbytes": "73", "totalpacket": "41" }, "VOQ1": { + "creditWDPkts": "79", "dropbytes": "54", "droppacket": "56", "totalbytes": "21", "totalpacket": "60" }, "VOQ2": { + "creditWDPkts": "35", "dropbytes": "39", "droppacket": "12", "totalbytes": "31", "totalpacket": "57" }, "VOQ3": { + "creditWDPkts": "47", "dropbytes": "98", "droppacket": "70", "totalbytes": "96", "totalpacket": "41" }, "VOQ4": { + "creditWDPkts": "62", "dropbytes": "36", "droppacket": "63", "totalbytes": "49", "totalpacket": "18" }, "VOQ5": { + "creditWDPkts": "19", "dropbytes": "15", "droppacket": "3", "totalbytes": "90", "totalpacket": "99" }, "VOQ6": { + "creditWDPkts": "33", "dropbytes": "94", "droppacket": "82", "totalbytes": "84", "totalpacket": "8" }, "VOQ7": { + "creditWDPkts": "3", "dropbytes": "92", "droppacket": "75", "totalbytes": "15", @@ -2142,48 +2166,56 @@ { "testsw|Ethernet0": { "VOQ0": { + "creditWDPkts": "0", "dropbytes": "0", "droppacket": "0", "totalbytes": "0", "totalpacket": "0" }, "VOQ1": { + "creditWDPkts": "1", "dropbytes": "1", "droppacket": "39", "totalbytes": "43", "totalpacket": "60" }, "VOQ2": { + "creditWDPkts": "19", "dropbytes": "21", "droppacket": "39", "totalbytes": "7", "totalpacket": "82" }, "VOQ3": { + "creditWDPkts": "12", "dropbytes": "94", "droppacket": "12", "totalbytes": "59", "totalpacket": "11" }, "VOQ4": { + "creditWDPkts": "11", "dropbytes": "40", "droppacket": "35", "totalbytes": "62", "totalpacket": "36" }, "VOQ5": { + "creditWDPkts": "15", "dropbytes": "88", "droppacket": "2", "totalbytes": "91", "totalpacket": "49" }, "VOQ6": { + "creditWDPkts": "17", "dropbytes": "74", "droppacket": "94", "totalbytes": "17", "totalpacket": "33" }, "VOQ7": { + "creditWDPkts": "73", "dropbytes": "33", "droppacket": "95", "totalbytes": "71", diff --git a/tests/remote_cli_test.py b/tests/remote_cli_test.py index d9fd672102..57a220be1e 100644 --- a/tests/remote_cli_test.py +++ b/tests/remote_cli_test.py @@ -11,10 +11,11 @@ import select import socket import termios +import getpass -MULTI_LC_REXEC_OUTPUT = '''======== sonic-lc1 output: ======== +MULTI_LC_REXEC_OUTPUT = '''======== LINE-CARD0|sonic-lc1 output: ======== hello world -======== LINE-CARD2 output: ======== +======== LINE-CARD2|sonic-lc3 output: ======== hello world ''' REXEC_HELP = '''Usage: cli [OPTIONS] LINECARD_NAMES... @@ -75,17 +76,27 @@ def mock_paramiko_connection(channel): return conn +def mock_getpass(prompt="Password:", stream=None): + return "dummy" + + class TestRemoteExec(object): + __getpass = getpass.getpass + @classmethod def setup_class(cls): print("SETUP") from .mock_tables import dbconnector dbconnector.load_database_config() + getpass.getpass = mock_getpass + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + getpass.getpass = TestRemoteExec.__getpass @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) - # @mock.patch.object(linecard.Linecard, '_get_password', mock.MagicMock(return_value='dummmy')) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) @mock.patch.object(paramiko.SSHClient, 'exec_command', mock.MagicMock(return_value=mock_exec_command())) def test_rexec_with_module_name(self): @@ -98,7 +109,6 @@ def test_rexec_with_module_name(self): @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) @mock.patch.object(paramiko.SSHClient, 'exec_command', mock.MagicMock(return_value=mock_exec_command())) def test_rexec_with_hostname(self): @@ -111,7 +121,6 @@ def test_rexec_with_hostname(self): @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) @mock.patch.object(paramiko.SSHClient, 'exec_command', mock.MagicMock(return_value=mock_exec_error_cmd())) def test_rexec_error_with_module_name(self): @@ -133,7 +142,6 @@ def test_rexec_error(self): @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) @mock.patch.object(linecard.Linecard, 'execute_cmd', mock.MagicMock(return_value="hello world")) def test_rexec_all(self): @@ -147,21 +155,19 @@ def test_rexec_all(self): @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) @mock.patch.object(linecard.Linecard, 'execute_cmd', mock.MagicMock(return_value="hello world")) def test_rexec_invalid_lc(self): runner = CliRunner() - LINECARD_NAME = "sonic-lc-3" + LINECARD_NAME = "sonic-lc-100" result = runner.invoke( rexec.cli, [LINECARD_NAME, "-c", "show version"]) print(result.output) assert result.exit_code == 1, result.output - assert "Linecard sonic-lc-3 not found\n" == result.output + assert "Linecard sonic-lc-100 not found\n" == result.output @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) @mock.patch.object(linecard.Linecard, 'execute_cmd', mock.MagicMock(return_value="hello world")) def test_rexec_unreachable_lc(self): @@ -175,7 +181,6 @@ def test_rexec_unreachable_lc(self): @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) @mock.patch.object(linecard.Linecard, 'execute_cmd', mock.MagicMock(return_value="hello world")) def test_rexec_help(self): @@ -188,7 +193,6 @@ def test_rexec_help(self): @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock(side_effect=paramiko.ssh_exception.NoValidConnectionsError({('192.168.0.1', 22): "None"}))) @mock.patch.object(linecard.Linecard, 'execute_cmd', mock.MagicMock(return_value="hello world")) @@ -202,7 +206,6 @@ def test_rexec_exception(self): assert "Failed to connect to sonic-lc1 with username admin\n" == result.output @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock(side_effect=paramiko.ssh_exception.NoValidConnectionsError({('192.168.0.1', 22): "None"}))) def test_rexec_with_user_param(self): @@ -214,6 +217,19 @@ def test_rexec_with_user_param(self): assert result.exit_code == 1, result.output assert "Failed to connect to sonic-lc1 with username testuser\n" == result.output + @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) + @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) + def test_rexec_without_password_input(self): + runner = CliRunner() + getpass.getpass = TestRemoteExec.__getpass + LINECARD_NAME = "all" + result = runner.invoke( + rexec.cli, [LINECARD_NAME, "-c", "show version"]) + getpass.getpass = mock_getpass + print(result.output) + assert result.exit_code == 1, result.output + assert "Aborted" in result.output + class TestRemoteCLI(object): @classmethod diff --git a/tests/remote_show_test.py b/tests/remote_show_test.py new file mode 100644 index 0000000000..e1be3d0302 --- /dev/null +++ b/tests/remote_show_test.py @@ -0,0 +1,73 @@ +import mock +import subprocess +from io import BytesIO +from click.testing import CliRunner + + +def mock_rexec_command(*args): + mock_stdout = BytesIO(b"""hello world""") + print(mock_stdout.getvalue().decode()) + return subprocess.CompletedProcess(args=[], returncode=0, stdout=mock_stdout, stderr=BytesIO()) + + +def mock_rexec_error_cmd(*args): + mock_stderr = BytesIO(b"""Error""") + print(mock_stderr.getvalue().decode()) + return subprocess.CompletedProcess(args=[], returncode=1, stdout=BytesIO(), stderr=mock_stderr) + + +MULTI_LC_REXEC_OUTPUT = '''Since the current device is a chassis supervisor, this command will be executed remotely on all linecards +hello world +''' + +MULTI_LC_ERR_OUTPUT = '''Since the current device is a chassis supervisor, this command will be executed remotely on all linecards +Error +''' + + +class TestRexecBgp(object): + @classmethod + def setup_class(cls): + pass + + @mock.patch("sonic_py_common.device_info.is_supervisor", mock.MagicMock(return_value=True)) + @mock.patch("sys.argv", ["show", "ip", "bgp", "summary"]) + def test_show_ip_bgp_rexec(self, setup_bgp_commands): + show = setup_bgp_commands + runner = CliRunner() + + _old_subprocess_run = subprocess.run + subprocess.run = mock_rexec_command + result = runner.invoke(show.cli.commands["ip"].commands["bgp"], args=["summary"]) + print(result.output) + subprocess.run = _old_subprocess_run + assert result.exit_code == 0 + assert MULTI_LC_REXEC_OUTPUT == result.output + + @mock.patch("sonic_py_common.device_info.is_supervisor", mock.MagicMock(return_value=True)) + @mock.patch("sys.argv", ["show", "ip", "bgp", "summary"]) + def test_show_ip_bgp_error_rexec(self, setup_bgp_commands): + show = setup_bgp_commands + runner = CliRunner() + + _old_subprocess_run = subprocess.run + subprocess.run = mock_rexec_error_cmd + result = runner.invoke(show.cli.commands["ip"].commands["bgp"], args=["summary"]) + print(result.output) + subprocess.run = _old_subprocess_run + assert result.exit_code == 1 + assert MULTI_LC_ERR_OUTPUT == result.output + + @mock.patch("sonic_py_common.device_info.is_supervisor", mock.MagicMock(return_value=True)) + @mock.patch("sys.argv", ["show", "ip", "bgp", "network", "10.0.0.0/24"]) + def test_show_ip_bgp_network_rexec(self, setup_bgp_commands): + show = setup_bgp_commands + runner = CliRunner() + + _old_subprocess_run = subprocess.run + subprocess.run = mock_rexec_command + result = runner.invoke(show.cli.commands["ip"].commands["bgp"], args=["network", "10.0.0.0/24"]) + print(result.output) + subprocess.run = _old_subprocess_run + assert result.exit_code == 0 + assert MULTI_LC_REXEC_OUTPUT == result.output diff --git a/tests/route_check_test.py b/tests/route_check_test.py index 1f92b3d19a..26c632d742 100644 --- a/tests/route_check_test.py +++ b/tests/route_check_test.py @@ -252,8 +252,11 @@ def run_test(self, ct_data): def mock_check_output(self, ct_data, *args, **kwargs): ns = self.extract_namespace_from_args(args[0]) - routes = ct_data.get(FRR_ROUTES, {}).get(ns, {}) - return json.dumps(routes) + if 'show runningconfiguration bgp' in ' '.join(args[0]): + return 'bgp suppress-fib-pending' + else: + routes = ct_data.get(FRR_ROUTES, {}).get(ns, {}) + return json.dumps(routes) def assert_results(self, ct_data, ret, res): expect_ret = ct_data.get(RET, 0) diff --git a/tests/sfputil_test.py b/tests/sfputil_test.py index 523848ec45..0e58daa18e 100644 --- a/tests/sfputil_test.py +++ b/tests/sfputil_test.py @@ -20,6 +20,46 @@ ERROR_NOT_IMPLEMENTED = 5 ERROR_INVALID_PORT = 6 +FLAT_MEMORY_MODULE_EEPROM_SFP_INFO_DICT = { + 'type': 'QSFP28 or later', + 'type_abbrv_name': 'QSFP28', + 'manufacturer': 'Mellanox', + 'model': 'MCP1600-C003', + 'vendor_rev': 'A2', + 'serial': 'MT1636VS10561', + 'vendor_oui': '00-02-c9', + 'vendor_date': '2016-07-18', + 'connector': 'No separable connector', + 'encoding': '64B66B', + 'ext_identifier': 'Power Class 1(1.5W max)', + 'ext_rateselect_compliance': 'QSFP+ Rate Select Version 1', + 'cable_type': 'Length Cable Assembly(m)', + 'cable_length': '3', + 'application_advertisement': 'N/A', + 'specification_compliance': "{'10/40G Ethernet Compliance Code': '40GBASE-CR4'}", + 'dom_capability': "{'Tx_power_support': 'no', 'Rx_power_support': 'no',\ + 'Voltage_support': 'no', 'Temp_support': 'no'}", + 'nominal_bit_rate': '255' +} +FLAT_MEMORY_MODULE_EEPROM = """Ethernet16: SFP EEPROM detected + Application Advertisement: N/A + Connector: No separable connector + Encoding: 64B66B + Extended Identifier: Power Class 1(1.5W max) + Extended RateSelect Compliance: QSFP+ Rate Select Version 1 + Identifier: QSFP28 or later + Length Cable Assembly(m): 3 + Nominal Bit Rate(100Mbs): 255 + Specification compliance: + 10/40G Ethernet Compliance Code: 40GBASE-CR4 + Vendor Date Code(YYYY-MM-DD Lot): 2016-07-18 + Vendor Name: Mellanox + Vendor OUI: 00-02-c9 + Vendor PN: MCP1600-C003 + Vendor Rev: A2 + Vendor SN: MT1636VS10561 +""" + class TestSfputil(object): def test_format_dict_value_to_string(self): sorted_key_table = [ @@ -570,6 +610,51 @@ def test_show_lpmode(self, mock_chassis): """ assert result.output == expected_output + @patch('sfputil.main.platform_chassis') + @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) + @patch('sfputil.main.is_port_type_rj45', MagicMock(return_value=True)) + def test_power_RJ45(self, mock_chassis): + mock_sfp = MagicMock() + mock_api = MagicMock() + mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) + mock_sfp.get_presence.return_value = True + mock_chassis.get_sfp = MagicMock(return_value=mock_sfp) + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['power'].commands['enable'], ["Ethernet0"]) + assert result.output == 'Power disable/enable is not available for RJ45 port Ethernet0.\n' + assert result.exit_code == EXIT_FAIL + + @patch('sfputil.main.platform_chassis') + @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) + @patch('sfputil.main.platform_sfputil', MagicMock(is_logical_port=MagicMock(return_value=1))) + @patch('sfputil.main.is_port_type_rj45', MagicMock(return_value=False)) + def test_power(self, mock_chassis): + mock_sfp = MagicMock() + mock_api = MagicMock() + mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) + mock_chassis.get_sfp = MagicMock(return_value=mock_sfp) + mock_sfp.get_presence.return_value = True + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['power'].commands['enable'], ["Ethernet0"]) + assert result.exit_code == 0 + + mock_sfp.get_presence.return_value = False + result = runner.invoke(sfputil.cli.commands['power'].commands['enable'], ["Ethernet0"]) + assert result.output == 'Ethernet0: SFP EEPROM not detected\n\n' + + mock_sfp.get_presence.return_value = True + mock_sfp.set_power = MagicMock(side_effect=NotImplementedError) + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['power'].commands['enable'], ["Ethernet0"]) + assert result.output == 'This functionality is currently not implemented for this platform\n' + assert result.exit_code == ERROR_NOT_IMPLEMENTED + + mock_sfp.set_power = MagicMock(return_value=False) + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['power'].commands['enable'], ["Ethernet0"]) + assert result.output == 'Failed\n' + + @patch('sfputil.main.platform_chassis') @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) @patch('sfputil.main.logical_port_name_to_physical_port_list', MagicMock(return_value=[1])) @@ -585,6 +670,39 @@ def test_show_eeprom_RJ45(self, mock_chassis): expected_output = "Ethernet16: SFP EEPROM is not applicable for RJ45 port\n\n\n" assert result.output == expected_output + @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) + @patch('sfputil.main.logical_port_name_to_physical_port_list', MagicMock(return_value=[1])) + @patch('sfputil.main.platform_sfputil', MagicMock(is_logical_port=MagicMock(return_value=1))) + @patch('sfputil.main.is_port_type_rj45', MagicMock(return_value=False)) + @pytest.mark.parametrize("exception, xcvr_api_none, expected_output", [ + (None, False, '''DOM values not supported for flat memory module\n\n'''), + (NotImplementedError, False, '''API is currently not implemented for this platform\n\n'''), + (None, True, '''API is none while getting DOM info!\n\n''') + ]) + @patch('sfputil.main.platform_chassis') + def test_show_eeprom_dom_conditions(self, mock_chassis, exception, xcvr_api_none, expected_output): + mock_sfp = MagicMock() + mock_sfp.get_presence.return_value = True + mock_sfp.get_transceiver_info.return_value = FLAT_MEMORY_MODULE_EEPROM_SFP_INFO_DICT + mock_chassis.get_sfp.return_value = mock_sfp + + if exception: + mock_chassis.get_sfp().get_xcvr_api.side_effect = exception + elif xcvr_api_none: + mock_chassis.get_sfp().get_xcvr_api.return_value = None + else: + mock_api = MagicMock() + mock_chassis.get_sfp().get_xcvr_api.return_value = mock_api + + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['show'].commands['eeprom'], ["-p", "Ethernet16", "-d"]) + + if exception or xcvr_api_none: + assert result.exit_code == ERROR_NOT_IMPLEMENTED + else: + assert result.exit_code == 0 + assert result.output == FLAT_MEMORY_MODULE_EEPROM + expected_output + @patch('sfputil.main.platform_chassis') @patch('sfputil.main.platform_sfputil', MagicMock(is_logical_port=MagicMock(return_value=0))) def test_show_eeprom_hexdump_invalid_port(self, mock_chassis): @@ -1510,3 +1628,46 @@ def test_load_port_config(self, mock_is_multi_asic): mock_is_multi_asic.return_value = False assert sfputil.load_port_config() == True + + @patch('sfputil.main.is_port_type_rj45', MagicMock(return_value=False)) + @patch('sfputil.main.platform_chassis') + @patch('sfputil.main.platform_sfputil', MagicMock(is_logical_port=MagicMock(return_value=1))) + @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) + def test_debug_loopback(self, mock_chassis): + mock_sfp = MagicMock() + mock_api = MagicMock() + mock_chassis.get_sfp = MagicMock(return_value=mock_sfp) + mock_sfp.get_presence.return_value = True + mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) + + runner = CliRunner() + mock_sfp.get_presence.return_value = False + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "host-side-input"]) + assert result.output == 'Ethernet0: SFP EEPROM not detected\n' + mock_sfp.get_presence.return_value = True + + mock_sfp.get_xcvr_api = MagicMock(side_effect=NotImplementedError) + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "host-side-input"]) + assert result.output == 'Ethernet0: This functionality is not implemented\n' + assert result.exit_code == ERROR_NOT_IMPLEMENTED + + mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "host-side-input"]) + assert result.output == 'Ethernet0: Set host-side-input loopback\n' + assert result.exit_code != ERROR_NOT_IMPLEMENTED + + mock_api.set_loopback_mode.return_value = False + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "none"]) + assert result.output == 'Ethernet0: Set none loopback failed\n' + assert result.exit_code == EXIT_FAIL + + mock_api.set_loopback_mode.return_value = True + mock_api.set_loopback_mode.side_effect = AttributeError + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "none"]) + assert result.output == 'Ethernet0: Set loopback mode is not applicable for this module\n' + assert result.exit_code == ERROR_NOT_IMPLEMENTED diff --git a/tests/show_bgp_network_test.py b/tests/show_bgp_network_test.py index f610199538..bfc23d8912 100644 --- a/tests/show_bgp_network_test.py +++ b/tests/show_bgp_network_test.py @@ -57,7 +57,8 @@ def setup_class(cls): ('bgp_v4_network_bestpath', 'bgp_v4_network_bestpath'), ('bgp_v6_network_longer_prefixes', 'bgp_v6_network_longer_prefixes'), ('bgp_v4_network', 'bgp_v4_network_longer_prefixes_error'), - ('bgp_v4_network', 'bgp_v6_network_longer_prefixes_error')], + ('bgp_v4_network', 'bgp_v6_network_longer_prefixes_error'), + ('bgp_v4_network', 'bgp_v4_network_all_asic_on_single_asic')], indirect=['setup_single_bgp_instance']) def test_bgp_network(self, setup_bgp_commands, test_vector, setup_single_bgp_instance): @@ -77,14 +78,16 @@ def setup_class(cls): @pytest.mark.parametrize( 'setup_multi_asic_bgp_instance, test_vector', - [('bgp_v4_network', 'bgp_v4_network_multi_asic'), + [('bgp_v4_network_all_asic', 'bgp_v4_network_default_multi_asic'), ('bgp_v6_network', 'bgp_v6_network_multi_asic'), ('bgp_v4_network_asic0', 'bgp_v4_network_asic0'), ('bgp_v4_network_ip_address_asic0', 'bgp_v4_network_ip_address_asic0'), ('bgp_v4_network_bestpath_asic0', 'bgp_v4_network_bestpath_asic0'), ('bgp_v6_network_asic0', 'bgp_v6_network_asic0'), ('bgp_v6_network_ip_address_asic0', 'bgp_v6_network_ip_address_asic0'), - ('bgp_v6_network_bestpath_asic0', 'bgp_v6_network_bestpath_asic0')], + ('bgp_v6_network_bestpath_asic0', 'bgp_v6_network_bestpath_asic0'), + ('bgp_v4_network_all_asic', 'bgp_v4_network_all_asic'), + ('bgp_v4_network', 'bgp_v4_network_asic_unknown')], indirect=['setup_multi_asic_bgp_instance']) def test_bgp_network(self, setup_bgp_commands, test_vector, setup_multi_asic_bgp_instance): diff --git a/tests/single_asic_dropstat_test.py b/tests/single_asic_dropstat_test.py new file mode 100644 index 0000000000..c521bcfa60 --- /dev/null +++ b/tests/single_asic_dropstat_test.py @@ -0,0 +1,72 @@ +import os +import sys +from .utils import get_result_and_return_code + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, test_path) +sys.path.insert(0, modules_path) + +dropstat_result = """\ + IFACE STATE RX_ERR RX_DROPS TX_ERR TX_DROPS DEBUG_0 DEBUG_2 +--------- ------- -------- ---------- -------- ---------- --------- --------- +Ethernet0 D 10 100 0 0 80 20 +Ethernet4 N/A 0 1000 0 0 800 100 +Ethernet8 N/A 100 10 0 0 10 0 + + DEVICE SWITCH_DROPS lowercase_counter +---------------- -------------- ------------------- +sonic_drops_test 1000 0 +""" + +dropstat_result_clear_all = """\ + IFACE STATE RX_ERR RX_DROPS TX_ERR TX_DROPS DEBUG_0 DEBUG_2 +--------- ------- -------- ---------- -------- ---------- --------- --------- +Ethernet0 D 0 0 0 0 0 0 +Ethernet4 N/A 0 0 0 0 0 0 +Ethernet8 N/A 0 0 0 0 0 0 + + DEVICE SWITCH_DROPS lowercase_counter +---------------- -------------- ------------------- +sonic_drops_test 0 0 +""" + + +class TestMultiAsicDropstat(object): + @classmethod + def setup_class(cls): + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "1" + print("SETUP") + + def test_show_dropcount_and_clear(self): + os.environ["UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE"] = "1" + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'show' + ]) + os.environ.pop("UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE") + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == dropstat_result + assert return_code == 0 + + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'clear' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == 'Cleared drop counters\n' and return_code == 0 + + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'show' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == dropstat_result_clear_all and return_code == 0 + + @classmethod + def teardown_class(cls): + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ.pop("UTILITIES_UNIT_TESTING") + print("TEARDOWN") diff --git a/tests/sonic_package_manager/conftest.py b/tests/sonic_package_manager/conftest.py index ccfc2f4929..98db887941 100644 --- a/tests/sonic_package_manager/conftest.py +++ b/tests/sonic_package_manager/conftest.py @@ -16,6 +16,7 @@ from sonic_package_manager.registry import RegistryResolver from sonic_package_manager.version import Version from sonic_package_manager.service_creator.creator import * +from sonic_package_manager.service_creator.creator import ETC_SYSTEMD_LOCATION @pytest.fixture @@ -405,6 +406,7 @@ def fake_db_for_migration(fake_metadata_resolver): def sonic_fs(fs): fs.create_file('/proc/1/root') fs.create_dir(ETC_SONIC_PATH) + fs.create_dir(ETC_SYSTEMD_LOCATION) fs.create_dir(SYSTEMD_LOCATION) fs.create_dir(DOCKER_CTL_SCRIPT_LOCATION) fs.create_dir(SERVICE_MGMT_SCRIPT_LOCATION) diff --git a/tests/sonic_package_manager/test_service_creator.py b/tests/sonic_package_manager/test_service_creator.py index 8e6edcd0f0..8278a8da2b 100644 --- a/tests/sonic_package_manager/test_service_creator.py +++ b/tests/sonic_package_manager/test_service_creator.py @@ -12,6 +12,7 @@ from sonic_package_manager.metadata import Metadata from sonic_package_manager.package import Package from sonic_package_manager.service_creator.creator import * +from sonic_package_manager.service_creator.creator import ETC_SYSTEMD_LOCATION from sonic_package_manager.service_creator.feature import FeatureRegistry @@ -106,6 +107,14 @@ def test_service_creator(sonic_fs, manifest, service_creator, package_manager): assert sonic_fs.exists(os.path.join(SERVICE_MGMT_SCRIPT_LOCATION, 'test.sh')) assert sonic_fs.exists(os.path.join(SYSTEMD_LOCATION, 'test.service')) + # Create symlinks and directory featured creates + os.symlink('/dev/null', os.path.join(ETC_SYSTEMD_LOCATION, 'test.service')) + os.symlink('/dev/null', os.path.join(ETC_SYSTEMD_LOCATION, 'test@1.service')) + os.symlink('/dev/null', os.path.join(ETC_SYSTEMD_LOCATION, 'test@2.service')) + os.mkdir(os.path.join(ETC_SYSTEMD_LOCATION, 'test.service.d')) + os.mkdir(os.path.join(ETC_SYSTEMD_LOCATION, 'test@1.service.d')) + os.mkdir(os.path.join(ETC_SYSTEMD_LOCATION, 'test@2.service.d')) + def read_file(name): with open(os.path.join(ETC_SONIC_PATH, name)) as file: return file.read() @@ -118,6 +127,15 @@ def read_file(name): assert generated_services_conf_content.endswith('\n') assert set(generated_services_conf_content.split()) == set(['test.service', 'test@.service']) + service_creator.remove(package) + + assert not sonic_fs.exists(os.path.join(ETC_SYSTEMD_LOCATION, 'test.service')) + assert not sonic_fs.exists(os.path.join(ETC_SYSTEMD_LOCATION, 'test@1.service')) + assert not sonic_fs.exists(os.path.join(ETC_SYSTEMD_LOCATION, 'test@2.service')) + assert not sonic_fs.exists(os.path.join(ETC_SYSTEMD_LOCATION, 'test.service.d')) + assert not sonic_fs.exists(os.path.join(ETC_SYSTEMD_LOCATION, 'test@1.service.d')) + assert not sonic_fs.exists(os.path.join(ETC_SYSTEMD_LOCATION, 'test@2.service.d')) + def test_service_creator_with_timer_unit(sonic_fs, manifest, service_creator): entry = PackageEntry('test', 'azure/sonic-test') diff --git a/tests/ssdutil_test.py b/tests/ssdutil_test.py new file mode 100644 index 0000000000..bd57b0cbe7 --- /dev/null +++ b/tests/ssdutil_test.py @@ -0,0 +1,42 @@ +import sys +import argparse +from unittest.mock import patch, MagicMock +import sonic_platform_base # noqa: F401 + +sys.modules['sonic_platform'] = MagicMock() +sys.modules['sonic_platform_base.sonic_ssd.ssd_generic'] = MagicMock() + +import ssdutil.main as ssdutil # noqa: E402 + + +class Ssd(): + + def get_model(self): + return 'SkyNet' + + def get_firmware(self): + return 'ABC' + + def get_serial(self): + return 'T1000' + + def get_health(self): + return 5 + + def get_temperature(self): + return 3000 + + def get_vendor_output(self): + return 'SONiC Test' + + +class TestSsdutil: + + @patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', MagicMock(return_value=("test_path", ""))) # noqa: E501 + @patch('os.geteuid', MagicMock(return_value=0)) + def test_sonic_storage_path(self): + + with patch('argparse.ArgumentParser.parse_args', MagicMock()) as mock_args: # noqa: E501 + sys.modules['sonic_platform_base.sonic_storage.ssd'] = MagicMock(return_value=Ssd()) # noqa: E501 + mock_args.return_value = argparse.Namespace(device='/dev/sda', verbose=True, vendor=True) # noqa: E501 + ssdutil.ssdutil() diff --git a/tests/suppress_pending_fib_test.py b/tests/suppress_pending_fib_test.py deleted file mode 100644 index 04064d306e..0000000000 --- a/tests/suppress_pending_fib_test.py +++ /dev/null @@ -1,34 +0,0 @@ -from click.testing import CliRunner - -import config.main as config -import show.main as show -from utilities_common.db import Db - - -class TestSuppressFibPending: - def test_synchronous_mode(self): - runner = CliRunner() - - db = Db() - - result = runner.invoke(config.config.commands['suppress-fib-pending'], ['enabled'], obj=db) - print(result.output) - assert result.exit_code == 0 - assert db.cfgdb.get_entry('DEVICE_METADATA' , 'localhost')['suppress-fib-pending'] == 'enabled' - - result = runner.invoke(show.cli.commands['suppress-fib-pending'], obj=db) - assert result.exit_code == 0 - assert result.output == 'Enabled\n' - - result = runner.invoke(config.config.commands['suppress-fib-pending'], ['disabled'], obj=db) - print(result.output) - assert result.exit_code == 0 - assert db.cfgdb.get_entry('DEVICE_METADATA' , 'localhost')['suppress-fib-pending'] == 'disabled' - - result = runner.invoke(show.cli.commands['suppress-fib-pending'], obj=db) - assert result.exit_code == 0 - assert result.output == 'Disabled\n' - - result = runner.invoke(config.config.commands['suppress-fib-pending'], ['invalid-input'], obj=db) - print(result.output) - assert result.exit_code != 0 diff --git a/tests/vlan_test.py b/tests/vlan_test.py index 2d3c1dcf1b..fc3569b87d 100644 --- a/tests/vlan_test.py +++ b/tests/vlan_test.py @@ -1426,7 +1426,7 @@ def test_config_set_router_port_on_member_interface(self): ["Ethernet4", "10.10.10.1/24"], obj=obj) print(result.exit_code, result.output) assert result.exit_code == 0 - assert 'Interface Ethernet4 is in trunk mode and needs to be in routed mode!' in result.output + assert 'Interface Ethernet4 is a member of vlan\nAborting!\n' in result.output def test_config_vlan_add_member_of_portchannel(self): runner = CliRunner() diff --git a/utilities_common/bgp.py b/utilities_common/bgp.py new file mode 100644 index 0000000000..640be87ee0 --- /dev/null +++ b/utilities_common/bgp.py @@ -0,0 +1,23 @@ +from swsscommon.swsscommon import CFG_BGP_DEVICE_GLOBAL_TABLE_NAME as CFG_BGP_DEVICE_GLOBAL # noqa + +# +# BGP constants ------------------------------------------------------------------------------------------------------- +# + +BGP_DEVICE_GLOBAL_KEY = "STATE" + +SYSLOG_IDENTIFIER = "bgp-cli" + + +# +# BGP helpers --------------------------------------------------------------------------------------------------------- +# + + +def to_str(state): + """ Convert boolean to string representation """ + if state == "true": + return "enabled" + elif state == "false": + return "disabled" + return state diff --git a/utilities_common/bgp_util.py b/utilities_common/bgp_util.py index 668ef344d5..cb49123c4b 100644 --- a/utilities_common/bgp_util.py +++ b/utilities_common/bgp_util.py @@ -299,6 +299,10 @@ def display_bgp_summary(bgp_summary, af): af: IPV4 or IPV6 ''' + + # "Neighbhor" is a known typo, + # but fix it will impact lots of automation scripts that the community users may have developed for years + # for now, let's keep it as it is. headers = ["Neighbhor", "V", "AS", "MsgRcvd", "MsgSent", "TblVer", "InQ", "OutQ", "Up/Down", "State/PfxRcd", "NeighborName"] diff --git a/utilities_common/cli.py b/utilities_common/cli.py index 63336377a8..c8a314b704 100644 --- a/utilities_common/cli.py +++ b/utilities_common/cli.py @@ -731,8 +731,7 @@ def run_command(command, display_cmd=False, ignore_error=False, return_cmd=False # with a list for next hops if (get_interface_naming_mode() == "alias" and not command_str.startswith("intfutil") and not re.search( "show ip|ipv6 route", command_str)): - run_command_in_alias_mode(command, shell=shell) - sys.exit(0) + return run_command_in_alias_mode(command, shell=shell) proc = subprocess.Popen(command, shell=shell, text=True, stdout=subprocess.PIPE) diff --git a/utilities_common/flock.py b/utilities_common/flock.py new file mode 100644 index 0000000000..c8faa8bfd9 --- /dev/null +++ b/utilities_common/flock.py @@ -0,0 +1,89 @@ +"""File lock utilities.""" +import click +import fcntl +import functools +import inspect +import os +import sys +import time + +from sonic_py_common import logger + + +log = logger.Logger() + + +def acquire_flock(fd, timeout=-1): + """Acquire the flock.""" + flags = fcntl.LOCK_EX + if timeout >= 0: + flags |= fcntl.LOCK_NB + else: + timeout = 0 + + start_time = current_time = time.time() + ret = False + while current_time - start_time <= timeout: + try: + fcntl.flock(fd, flags) + except (IOError, OSError): + ret = False + else: + ret = True + break + current_time = time.time() + if timeout != 0: + time.sleep(0.2) + return ret + + +def release_flock(fd): + """Release the flock.""" + fcntl.flock(fd, fcntl.LOCK_UN) + + +def try_lock(lock_file, timeout=-1): + """Decorator to try lock file using fcntl.flock.""" + def _decorator(func): + @functools.wraps(func) + def _wrapper(*args, **kwargs): + bypass_lock = False + + # Get the bypass_lock argument from the function signature + func_signature = inspect.signature(func) + has_bypass_lock = "bypass_lock" in func_signature.parameters + if has_bypass_lock: + func_ba = func_signature.bind(*args, **kwargs) + func_ba.apply_defaults() + bypass_lock = func_ba.arguments["bypass_lock"] + + if bypass_lock: + click.echo(f"Bypass lock on {lock_file}") + return func(*args, **kwargs) + else: + fd = os.open(lock_file, os.O_CREAT | os.O_RDWR) + if acquire_flock(fd, timeout): + click.echo(f"Acquired lock on {lock_file}") + os.truncate(fd, 0) + # Write pid and the function name to the lock file as a record + os.write(fd, f"{func.__name__}, pid {os.getpid()}\n".encode()) + try: + return func(*args, **kwargs) + finally: + release_flock(fd) + click.echo(f"Released lock on {lock_file}") + os.truncate(fd, 0) + os.close(fd) + else: + click.echo(f"Failed to acquire lock on {lock_file}") + lock_owner = os.read(fd, 1024).decode() + if not lock_owner: + lock_owner = "unknown" + log.log_notice( + (f"{func.__name__} failed to acquire lock on {lock_file}," + f" which is taken by {lock_owner}") + ) + os.close(fd) + sys.exit(1) + return _wrapper + return _decorator diff --git a/utilities_common/general.py b/utilities_common/general.py index 6ed70a46a1..97155532ca 100644 --- a/utilities_common/general.py +++ b/utilities_common/general.py @@ -2,8 +2,11 @@ import importlib.util import sys -from sonic_py_common.multi_asic import is_multi_asic +from sonic_py_common import multi_asic from swsscommon import swsscommon +FEATURE_TABLE = "FEATURE" +FEATURE_HAS_PER_ASIC_SCOPE = 'has_per_asic_scope' +FEATURE_HAS_GLOBAL_SCOPE = 'has_global_scope' def load_module_from_source(module_name, file_path): """ @@ -25,7 +28,7 @@ def load_db_config(): - database_global.json for multi asic - database_config.json for single asic ''' - if is_multi_asic(): + if multi_asic.is_multi_asic(): if not swsscommon.SonicDBConfig.isGlobalInit(): swsscommon.SonicDBConfig.load_sonic_global_db_config() else: @@ -39,6 +42,28 @@ def get_optional_value_for_key_in_config_tbl(config_db, port, key, table): return None value = info_dict.get(key, None) - return value + +def get_feature_state_data(config_db, feature): + ''' + Get feature state from FEATURE table from CONFIG_DB. + return global_scope, per_asic_scope + - if feature state is disabled, return "False" for both global_scope and per_asic_scope + - if is not a multi-asic, return feature state for global_scope ("True/False") and + "False" for asic_scope + ''' + global_scope = "False" + asic_scope = "False" + info_dict = {} + info_dict = config_db.get_entry(FEATURE_TABLE, feature) + if info_dict is None: + return global_scope, asic_scope + if multi_asic.is_multi_asic(): + if info_dict['state'].lower() == "enabled": + global_scope = info_dict[FEATURE_HAS_GLOBAL_SCOPE] + asic_scope = info_dict[FEATURE_HAS_PER_ASIC_SCOPE] + else: + if info_dict['state'].lower() == "enabled": + global_scope = "True" + return global_scope, asic_scope