diff --git a/CHANGES.rst b/CHANGES.rst index 929f4ae..ee4bf0c 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,6 +1,17 @@ Version History =============================================================================== +Version: 4.0.8 +------------------------------------------------------------------------------- +ENHANCEMENTS: + +* Enhanced parallel scenario execution management by utilizing scenario lines over scenario names. This allows running scenarios that might change their name without causing issues in parallel executions. + +FIXES: + +* Avoid using the scenario name as part of the hash to generate output paths, as if the scenario name is changed, the path does not match. Instead, the feature filename and the line where the scenario is located is used to generate the hash. + + Version: 4.0.7 ------------------------------------------------------------------------------- ENHANCEMENTS: diff --git a/behavex/environment.py b/behavex/environment.py index fe046d1..167c28d 100644 --- a/behavex/environment.py +++ b/behavex/environment.py @@ -17,7 +17,7 @@ from behavex.conf_mgr import get_env, get_param from behavex.global_vars import global_vars from behavex.outputs import report_json, report_xml -from behavex.outputs.report_utils import create_log_path +from behavex.outputs.report_utils import create_log_path, strip_ansi_codes from behavex.utils import (LOGGING_CFG, create_custom_log_when_called, get_autoretry_attempts, get_logging_level, get_scenario_tags, get_scenarios_instances) @@ -122,8 +122,8 @@ def before_scenario(context, scenario): context.bhx_execution_attempts[scenario.name] = 0 execution_attempt = context.bhx_execution_attempts[scenario.name] retrying_execution = True if execution_attempt > 0 else False - concat_feature_and_scenario_name = "{}-{}".format(str(context.feature.name), str(scenario.name)) - context.log_path = create_log_path(concat_feature_and_scenario_name, retrying_execution) + concat_feature_and_scenario_line = "{}-{}".format(str(context.feature.filename), str(scenario.line)) + context.log_path = create_log_path(concat_feature_and_scenario_line, retrying_execution) context.bhx_log_handler = _add_log_handler(context.log_path) if retrying_execution: logging.info('Retrying scenario execution...\n'.format()) @@ -197,6 +197,7 @@ def _add_log_handler(log_path): ) log_level = get_logging_level() logging.getLogger().setLevel(log_level) + file_handler.addFilter(lambda record: setattr(record, 'msg', strip_ansi_codes(str(record.msg))) or True) file_handler.setFormatter(_get_log_formatter()) logging.getLogger().addHandler(file_handler) return file_handler diff --git a/behavex/outputs/jinja/main.jinja2 b/behavex/outputs/jinja/main.jinja2 index 37014f6..f79a679 100644 --- a/behavex/outputs/jinja/main.jinja2 +++ b/behavex/outputs/jinja/main.jinja2 @@ -282,8 +282,8 @@ Evidence {%- for scenario in feature.scenarios -%} - {%- set concat_feature_and_scenario_name = feature.name + "-" + scenario.name -%} - {%- set scenario_hash = concat_feature_and_scenario_name|get_string_hash -%} + {%- set concat_feature_and_scenario_line = feature.filename + "-" + scenario.line|string -%} + {%- set scenario_hash = concat_feature_and_scenario_line|get_string_hash -%} {%- set scenario_hash = scenario_hash|string -%} {%- set scenario_tags = scenario|get_scenario_tags -%} diff --git a/behavex/outputs/jinja_mgr.py b/behavex/outputs/jinja_mgr.py index 0bd0e09..395a1ed 100644 --- a/behavex/outputs/jinja_mgr.py +++ b/behavex/outputs/jinja_mgr.py @@ -120,7 +120,7 @@ def _exist_extra_logs(scenario): def get_path_extra_logs(scenario): extra_logs_folder = os.path.join( get_env('logs'), - str(get_string_hash("{}-{}".format(str(scenario["feature"]), str(scenario["name"])))), + str(get_string_hash("{}-{}".format(str(scenario["filename"]), str(scenario["line"])))), 'evidence', ) return extra_logs_folder @@ -132,7 +132,7 @@ def get_relative_extra_logs_path(scenario): [ 'outputs', 'logs', - get_string_hash("{}-{}".format(str(scenario["feature"]), str(scenario["name"]))), + get_string_hash("{}-{}".format(str(scenario["filename"]), str(scenario["line"]))), 'evidence', ] ) diff --git a/behavex/outputs/report_json.py b/behavex/outputs/report_json.py index 295eddb..f8673d0 100644 --- a/behavex/outputs/report_json.py +++ b/behavex/outputs/report_json.py @@ -27,7 +27,8 @@ from behavex.outputs.report_utils import (get_environment_details, get_error_message, match_for_execution, text) -from behavex.utils import get_scenario_tags, try_operate_descriptor +from behavex.utils import (generate_hash, get_scenario_tags, + try_operate_descriptor) def add_step_info(step, parent_node): @@ -159,6 +160,7 @@ def _processing_scenarios(scenarios, scenario_list, id_feature): if match_for_execution(scenario_tags): # Scenario was selectable scenario_info = {} + scenario_info['line'] = getattr(scenario, 'line') scenario_info['name'] = getattr(scenario, 'name') scenario_info['duration'] = getattr(scenario, 'duration') scenario_info['status'] = getattr(scenario, 'status').name @@ -179,7 +181,8 @@ def _processing_scenarios(scenarios, scenario_list, id_feature): scenario_info['error_lines'] = error_lines scenario_info['error_step'] = error_step scenario_info['error_background'] = error_background - scenario_info['id_hash'] = _generate_hash(scenario.name) + scenario_info['id_hash'] = generate_hash("{}:{}".format(scenario.filename, + scenario.line)) if scenario.feature.name in global_vars.retried_scenarios: if ( scenario.name @@ -253,13 +256,9 @@ def _step_to_dict(index, step): def process_step_definition(step, step_info): definition = registry.find_step_definition(step) if definition: - hash_step = _generate_hash(definition.pattern) + hash_step = generate_hash(definition.pattern) if hash_step not in global_vars.steps_definitions: global_vars.steps_definitions[hash_step] = definition.pattern step_info['hash'] = hash_step else: step_info['hash'] = 0 - - -def _generate_hash(word): - return abs(hash(word)) % (10 ** 8) diff --git a/behavex/outputs/report_utils.py b/behavex/outputs/report_utils.py index 2b01d1d..234b276 100644 --- a/behavex/outputs/report_utils.py +++ b/behavex/outputs/report_utils.py @@ -147,10 +147,11 @@ def gather_steps(features): def gather_errors(scenario, retrieve_step_name=False): + error_msg = list(map(lambda line: strip_ansi_codes(line), scenario['error_msg'])) if retrieve_step_name: - return scenario['error_msg'], scenario['error_lines'], scenario['error_step'] + return error_msg, scenario['error_lines'], scenario['error_step'] else: - return scenario['error_msg'], scenario['error_lines'] + return error_msg, scenario['error_lines'] def pretty_print_time(seconds_float, sec_decimals=1): @@ -449,3 +450,6 @@ def get_environment_details(): environment_details_raw_data = os.getenv('ENVIRONMENT_DETAILS', None) environment_details = environment_details_raw_data.split(',') if environment_details_raw_data else [] return environment_details + +def strip_ansi_codes(from_str: str): + return re.sub(r'\x1B\[[0-?9;]*[mGJK]', '', from_str) diff --git a/behavex/runner.py b/behavex/runner.py index 0076f03..a5ef17d 100644 --- a/behavex/runner.py +++ b/behavex/runner.py @@ -51,7 +51,7 @@ cleanup_folders, configure_logging, copy_bootstrap_html_generator, create_execution_complete_callback_function, - explore_features, generate_reports, + explore_features, generate_hash, generate_reports, get_json_results, get_logging_level, get_scenario_tags, get_scenarios_instances, get_text, join_feature_reports, @@ -71,7 +71,6 @@ match_include = None include_path_match = None include_name_match = None -scenario_lines = {} def main(): @@ -251,7 +250,7 @@ def launch_behavex(): feature_filename=None, feature_json_skeleton=None, scenarios_to_run_in_feature=None, - scenario_name=None, + scenario_line=None, multiprocess=False, config=config, lock=None, @@ -286,11 +285,10 @@ def launch_behavex(): results = get_json_results() processed_feature_filenames = [] if results: - failures = {} + failures = [] for feature in results['features']: processed_feature_filenames.append(feature['filename']) filename = feature['filename'] - failures[filename] = [] if feature['status'] == 'failed': totals['features']['failed'] += 1 elif feature['status'] == 'passed': @@ -304,7 +302,7 @@ def launch_behavex(): for scenario in feature['scenarios']: if scenario['status'] == 'failed': totals['scenarios']['failed'] += 1 - failures[filename].append(scenario['name']) + failures.append('{}:{}'.format(filename, scenario['line'])) if 'MUTE' not in scenario['tags']: failing_non_muted_tests = True elif scenario['status'] == 'passed': @@ -316,8 +314,7 @@ def launch_behavex(): if failures: failures_file_path = os.path.join(get_env('OUTPUT'), global_vars.report_filenames['report_failures']) with open(failures_file_path, 'w') as failures_file: - parameters = create_test_list(failures) - failures_file.write(parameters) + failures_file.write(','.join(failures)) # Calculates final exit code. execution_codes is 1 only if an execution exception arises if isinstance(execution_codes, list): execution_failed = True if sum(execution_codes) > 0 else False @@ -375,23 +372,6 @@ def notify_missing_features(features_path): print_parallel('path.not_found', os.path.realpath(include_path)) -def create_test_list(test_list): - """Create a list of tests to run. - - Args: - test_list (dict): Dictionary of features and their scenarios. - - Returns: - str: Comma-separated list of test paths. - """ - paths = [] - sce_lines = get_env('scenario_lines') - for feature, scenarios in test_list.items(): - for scenario_name in scenarios: - paths.append('{}:{}'.format(feature, sce_lines[feature][scenario_name])) - return ','.join(paths) - - def create_scenario_line_references(features): """Create references for scenario lines in the features. @@ -401,13 +381,10 @@ def create_scenario_line_references(features): Returns: dict: Updated features with scenario line references. """ - sce_lines = {} updated_features = {} for feature_path, scenarios in features.items(): for scenario in scenarios: scenario_filename = text(scenario.filename) - if scenario_filename not in sce_lines: - sce_lines[scenario_filename] = {} if global_vars.rerun_failures or ".feature:" in feature_path: feature_without_scenario_line = feature_path.split(":")[0] if feature_without_scenario_line not in updated_features: @@ -417,27 +394,22 @@ def create_scenario_line_references(features): if scenario_outline_instance.line == int(feature_path.split(":")[1]): if scenario_outline_instance not in updated_features[feature_without_scenario_line]: updated_features[feature_without_scenario_line].append(scenario_outline_instance) - sce_lines[scenario_filename][scenario_outline_instance.name] = scenario_outline_instance.line break else: if scenario.line == int(feature_path.split(":")[1]): if scenario not in updated_features[feature_without_scenario_line]: updated_features[feature_without_scenario_line].append(scenario) - sce_lines[scenario_filename][scenario.name] = scenario.line else: updated_features_path = scenario.feature.filename if updated_features_path not in updated_features: updated_features[updated_features_path] = [] if isinstance(scenario, ScenarioOutline): for scenario_outline_instance in scenario.scenarios: - sce_lines[scenario_filename][scenario_outline_instance.name] = scenario_outline_instance.line if scenario_outline_instance not in updated_features[updated_features_path]: updated_features[updated_features_path].append(scenario_outline_instance) else: - sce_lines[scenario_filename][scenario.name] = scenario.line if scenario not in updated_features[updated_features_path]: updated_features[updated_features_path].append(scenario) - set_env('scenario_lines', sce_lines) return updated_features @@ -481,7 +453,7 @@ def launch_by_feature(features, feature_filename=serial_feature["feature_filename"], feature_json_skeleton=serial_feature["feature_json_skeleton"], scenarios_to_run_in_feature=None, - scenario_name=None, + scenario_line=None, multiprocess=True, config=ConfigRun(), lock=None, @@ -500,7 +472,7 @@ def launch_by_feature(features, feature_filename=feature_filename, feature_json_skeleton=feature_json_skeleton, scenarios_to_run_in_feature=None, - scenario_name=None, + scenario_line=None, multiprocess=True, config=ConfigRun(), lock=lock, @@ -553,7 +525,7 @@ def launch_by_scenario(features, feature_filename = scenario.feature.filename scenario_information = {"feature_filename": feature_filename, "feature_json_skeleton": feature_json_skeleton, - "scenario_name": scenario.name} + "scenario_line": scenario.line} total_scenarios_to_run[feature_filename] = total_scenarios_to_run.setdefault(feature_filename, 0) + 1 if 'SERIAL' in scenario_tags: for key, list_scenarios in serial_scenarios.items(): @@ -587,7 +559,7 @@ def launch_by_scenario(features, feature_filename=scen_info["feature_filename"], feature_json_skeleton=scen_info["feature_json_skeleton"], scenarios_to_run_in_feature=scenarios_to_run_in_feature, - scenario_name=scen_info["scenario_name"], + scenario_line=scen_info["scenario_line"], multiprocess=True, config=ConfigRun(), lock=None, @@ -604,13 +576,13 @@ def launch_by_scenario(features, scenarios_to_run_in_feature = total_scenarios_to_run[scenario_information["feature_filename"]] feature_filename = scenario_information["feature_filename"] feature_json_skeleton = scenario_information["feature_json_skeleton"] - scenario_name = scenario_information["scenario_name"] + scenario_line = scenario_information["scenario_line"] future = process_pool.submit(execute_tests, features_path=features_path, feature_filename=feature_filename, feature_json_skeleton=feature_json_skeleton, scenarios_to_run_in_feature=scenarios_to_run_in_feature, - scenario_name=scenario_name, + scenario_line=scenario_line, multiprocess=True, config=ConfigRun(), lock=lock, @@ -632,7 +604,7 @@ def execute_tests( feature_filename, feature_json_skeleton, scenarios_to_run_in_feature, - scenario_name, + scenario_line, multiprocess, config, lock, @@ -645,7 +617,7 @@ def execute_tests( feature_filename (str): Name of the feature file. feature_json_skeleton (str): JSON skeleton of the feature. scenarios_to_run_in_feature (int): Number of scenarios to run in the feature. - scenario_name (str): Name of the scenario. + scenario_line (int): Line of the scenario. multiprocess (bool): Whether to use multiprocessing. config (ConfigRun): Configuration object. lock (Lock): Multiprocessing lock. @@ -667,7 +639,7 @@ def execute_tests( multiprocess=multiprocess, execution_id=execution_id, feature=feature_filename, - scenario=scenario_name, + scenario_line=scenario_line, config=config) except Exception as exception: traceback.print_exc() @@ -682,9 +654,9 @@ def execute_tests( 'features': [json.loads(feature_json_skeleton)], 'steps_definition': []} for skeleton_feature in json_output["features"]: - if scenario_name: + if scenario_line: for skeleton_scenario in skeleton_feature["scenarios"]: - if scenario_name_matching(scenario_name, skeleton_scenario['name']): + if str(skeleton_scenario['line']) == str(scenario_line): skeleton_scenario['status'] = 'failed' skeleton_scenario['error_msg'] = get_text('scenario.execution_crashed') else: @@ -697,16 +669,16 @@ def execute_tests( json_output = {'environment': [], 'features': [], 'steps_definition': []} else: json_output = dump_json_results() - if scenario_name: + if scenario_line: json_output['features'] = filter_feature_executed(json_output, text(feature_filename), - scenario_name) + scenario_line=scenario_line) if len(json_output['features']) == 0 or len(json_output['features'][0]['scenarios']) == 0: # Adding scenario data if the test was removed from the execution (setting it as "Untested") json_output['features'] = [json.loads(feature_json_skeleton)] try: processing_xml_feature(json_output=json_output, - scenario=scenario_name, + scenario_line=scenario_line, feature_filename=feature_filename, scenarios_to_run_in_feature=scenarios_to_run_in_feature, lock=lock, @@ -721,20 +693,20 @@ def execute_tests( raise -def filter_feature_executed(json_output, filename, scenario_name): +def filter_feature_executed(json_output, filename, scenario_line): """ Filter the executed feature from the JSON output. Args: json_output (dict): JSON output of the test execution. filename (str): Name of the feature file. - scenario_name (str): Name of the scenario. + scenario_line (str): Line of the scenario. """ for feature in json_output.get('features', '')[:]: if feature.get('filename', '') == filename: mapping_scenarios = [] for scenario in feature['scenarios']: - if scenario_name_matching(scenario_name, scenario['name']): + if str(scenario['line']) == str(scenario_line): mapping_scenarios.append(scenario) feature['scenarios'] = mapping_scenarios return [feature] @@ -881,7 +853,7 @@ def remove_temporary_files(parallel_processes, json_reports): print(remove_ex) -def processing_xml_feature(json_output, scenario, feature_filename, +def processing_xml_feature(json_output, scenario_line, feature_filename, scenarios_to_run_in_feature=None, lock=None, shared_removed_scenarios=None): """ @@ -911,9 +883,7 @@ def processing_xml_feature(json_output, scenario, feature_filename, reported_scenarios = json_output['features'][0]['scenarios'] executed_scenarios = [] for reported_scenario in reported_scenarios: - reported_name = reported_scenario['name'] - if reported_name == scenario or ('@' in reported_name and - scenario_name_matching(scenario, reported_name)): + if reported_scenario['line'] == scenario_line: executed_scenarios.append(reported_scenario) json_output['features'][0]['scenarios'] = executed_scenarios feature_name = os.path.join( @@ -1031,7 +1001,7 @@ def _store_tags_to_env_variable(tags): set_env_variable('TAGS', '') -def _set_behave_arguments(features_path, multiprocess, execution_id=None, feature=None, scenario=None, config=None): +def _set_behave_arguments(features_path, multiprocess, execution_id=None, feature=None, scenario_line=None, config=None): """ Set the arguments for Behave framework based on the given parameters. @@ -1040,7 +1010,7 @@ def _set_behave_arguments(features_path, multiprocess, execution_id=None, featur multiprocess (bool): Whether to use multiprocessing. execution_id (str): Execution ID. feature (Feature): Feature object. - scenario (Scenario): Scenario object. + scenario_line (int): Scenario line. config (ConfigRun): Configuration object. Returns: @@ -1049,24 +1019,10 @@ def _set_behave_arguments(features_path, multiprocess, execution_id=None, featur arguments = [] output_folder = config.get_env('OUTPUT') if multiprocess: - if not feature: - arguments.append(features_path) - else: - arguments.append(feature) + updated_features_path = features_path if not feature else feature + updated_features_path = updated_features_path if not scenario_line else "{}:{}".format(updated_features_path, scenario_line) + arguments.append(updated_features_path) arguments.append('--no-summary') - if scenario: - outline_examples_in_name = re.findall('<[\\S]*>', scenario) - pattern = "(.?--.?@\\d+.\\d+\\s*\\S*)" - if bool(re.search(pattern, scenario)): - scenario_outline_compatible = '^{}$'.format(re.escape(scenario)) - else: - scenario_outline_compatible = '^{}{}?$'.format(re.escape(scenario), pattern) - if outline_examples_in_name: - for example_name in outline_examples_in_name: - escaped_example_name = re.escape(example_name) - scenario_outline_compatible = scenario_outline_compatible.replace(escaped_example_name, "[\\S ]*") - arguments.append('--name') - arguments.append("{}".format(scenario_outline_compatible)) worker_id = multiprocessing.current_process().name.split('-')[-1] arguments.append('--outfile') @@ -1192,26 +1148,6 @@ def set_args_captures(args, args_sys): args.append('--no-{}'.format(default_arg.replace('_', '-'))) -def scenario_name_matching(abstract_scenario_name, scenario_name): - """ - Check if the scenario name matches the abstract scenario name (as the scenario might represent a Scenario Outline, with example parameters in name). - - Args: - abstract_scenario_name (str): Abstract scenario name - scenario_name (str): Scenario name to map against the abstract scenario name. - - Returns: - bool: Whether the scenario name matches the abstract scenario name. - """ - outline_examples_in_name = re.findall('<\\S*>', abstract_scenario_name) - scenario_outline_compatible = '^{}(.--.@\\d+.\\d+\\s*\\S*)?$'.format(re.escape(abstract_scenario_name)) - for example_name in outline_examples_in_name: - escaped_example_name = re.escape(example_name) - scenario_outline_compatible = scenario_outline_compatible.replace(escaped_example_name, "[\\S ]*") - pattern = re.compile(scenario_outline_compatible) - return pattern.match(scenario_name) - - def dump_json_results(): """ Dump the JSON results of the test execution. diff --git a/behavex/utils.py b/behavex/utils.py index c6f4707..6c8d4dc 100644 --- a/behavex/utils.py +++ b/behavex/utils.py @@ -9,6 +9,7 @@ import codecs import functools +import hashlib import json import logging import multiprocessing @@ -81,7 +82,6 @@ def get_logging_level(): # noinspection PyDictCreation def join_feature_reports(json_reports): - scenario_lines = get_env('scenario_lines') if type(json_reports) is list: if len(json_reports) == 1: merged_json = json_reports[0] @@ -95,13 +95,12 @@ def join_feature_reports(json_reports): if merged_json['features'] and (IncludeNameMatch().bool() or IncludePathsMatch().bool() or MatchInclude().bool()): delete = [] for index, feature in enumerate(merged_json['features'][:]): - lines = scenario_lines.get(feature['filename'], {}) scenarios = [ scenario for scenario in feature['scenarios'] if IncludeNameMatch()(scenario['name']) and MatchInclude()(feature['filename']) - and IncludePathsMatch()(scenario['filename'], lines.get(scenario['name'], -1)) + and IncludePathsMatch()(scenario['filename'], scenario['line']) ] if not scenarios: # create index list for delete after iterated the feature list. @@ -466,7 +465,7 @@ def create_custom_log_when_called(self, key): if not hasattr(self, 'scenario'): ex_msg = '"evidence_path" is only accessible in the context of a test scenario' raise Exception(ex_msg) - self.log_path = get_string_hash("{}-{}".format(str(self.feature.name), str(self.scenario.name))) + self.log_path = get_string_hash("{}-{}".format(str(self.feature.filename), str(self.scenario.line))) evidence_path = os.path.join(self.log_path, 'evidence') self.evidence_path = evidence_path try: @@ -574,3 +573,13 @@ def get_autoretry_attempts(tags): attempts_in_tag = result.group(2) attempts = int(attempts_in_tag) if attempts_in_tag else 2 return attempts + +def generate_hash(word): + # Use SHA-256 for better distribution + sha256 = hashlib.sha256(word.encode('utf-8')).digest() + # Take the first 6 bytes (48 bits) of the hash + truncated_hash = sha256[:6] + # Convert to an integer + hash_int = int.from_bytes(truncated_hash, byteorder='big') + # Ensure the result fits in 48 bits (optional, for consistency) + return hash_int & 0xFFFFFFFFFFFF diff --git a/pyproject.toml b/pyproject.toml index 95c913a..06a4851 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "behavex" -version = "4.0.7" +version = "4.0.8" description = "Agile testing framework on top of Behave (BDD)." readme = "README.md" license = { text = "MIT" } diff --git a/setup.py b/setup.py index 8fefe39..f973f25 100644 --- a/setup.py +++ b/setup.py @@ -6,7 +6,7 @@ setup( name='behavex', - version='4.0.7', + version='4.0.8', license="MIT", platforms=['any'], python_requires='>=3.5', diff --git a/tests/features/crashing_tests.feature b/tests/features/crashing_tests.feature index d6d9d83..6832bc1 100644 --- a/tests/features/crashing_tests.feature +++ b/tests/features/crashing_tests.feature @@ -13,7 +13,8 @@ Feature: Crashing Tests @CRASHING Scenario: Crashing tests with parallel processes and parallel scheme set as "scenario" should be reported Given I have installed behavex - When I run the behavex command with a crashing test with "2" parallel processes and parallel scheme set as "scenario" + When I setup the behavex command with "2" parallel processes and parallel scheme set as "scenario" + And I run the behavex command with a crashing test Then I should see the following behavex console outputs and exit code "1" | output_line | | Exit code: 1 | @@ -23,7 +24,8 @@ Feature: Crashing Tests @CRASHING Scenario: Crashing tests with parallel processes and parallel scheme set as "feature" should be reported Given I have installed behavex - When I run the behavex command with a crashing test with "2" parallel processes and parallel scheme set as "feature" + When I setup the behavex command with "2" parallel processes and parallel scheme set as "feature" + And I run the behavex command with a crashing test Then I should see the following behavex console outputs and exit code "1" | output_line | | Exit code: 1 | diff --git a/tests/features/failing_scenarios.feature b/tests/features/failing_scenarios.feature index d404065..c8740b9 100644 --- a/tests/features/failing_scenarios.feature +++ b/tests/features/failing_scenarios.feature @@ -9,3 +9,4 @@ Feature: Failing Scenarios | 0 scenarios passed, 1 failed, 0 skipped | | Exit code: 1 | And I should not see exception messages in the output + And I should see the same number of scenarios in the reports and the console output diff --git a/tests/features/parallel_executions.feature b/tests/features/parallel_executions.feature index 614ca76..1fab49f 100644 --- a/tests/features/parallel_executions.feature +++ b/tests/features/parallel_executions.feature @@ -10,6 +10,7 @@ Feature: Parallel executions | PARALLEL_SCHEME \| | | Exit code: 1 | And I should not see error messages in the output + And I should see the same number of scenarios in the reports and the console output Examples: | parallel_scheme | parallel_processes | | scenario | 3 | @@ -29,6 +30,7 @@ Feature: Parallel executions | Exit code: 0 | | 1 scenario passed, 0 failed | And I should not see error messages in the output + And I should see the same number of scenarios in the reports and the console output Examples: | parallel_scheme | parallel_processes | tags | | scenario | 3 | -t=@PASSING_TAG_3 -t=@PASSING_TAG_3_1 | diff --git a/tests/features/passing_scenarios.feature b/tests/features/passing_scenarios.feature index 6db552c..4ed05d0 100644 --- a/tests/features/passing_scenarios.feature +++ b/tests/features/passing_scenarios.feature @@ -9,7 +9,7 @@ Feature: Passing Scenarios | scenarios passed, 0 failed, 0 skipped | | Exit code: 0 | And I should not see error messages in the output - + And I should see the same number of scenarios in the reports and the console output @PASSING Scenario: Passing tests with AND tags @@ -22,6 +22,7 @@ Feature: Passing Scenarios | 1 scenario passed, 0 failed | | Exit code: 0 | And I should not see error messages in the output + And I should see the same number of scenarios in the reports @PASSING @WIP Scenario: Passing tests with NOT tags @@ -34,3 +35,4 @@ Feature: Passing Scenarios | 1 scenario passed, 0 failed | | Exit code: 0 | And I should not see error messages in the output + And I should see the same number of scenarios in the reports and the console output diff --git a/tests/features/progress_bar.feature b/tests/features/progress_bar.feature index 772eba3..9d78e7e 100644 --- a/tests/features/progress_bar.feature +++ b/tests/features/progress_bar.feature @@ -2,7 +2,7 @@ Feature: Progress Bar Background: Given I have installed behavex - And I have the progress bar enabled + And The progress bar is enabled @PROGRESS_BAR @PARALLEL Scenario Outline: Progress bar should be shown when running tests in parallel diff --git a/tests/features/renaming_scenarios.feature b/tests/features/renaming_scenarios.feature new file mode 100644 index 0000000..b39cc2a --- /dev/null +++ b/tests/features/renaming_scenarios.feature @@ -0,0 +1,28 @@ +Feature: Renaming Scenarios + + @RENAME + Scenario: Renaming scenarios and features + Given I have installed behavex + When I run the behavex command that renames scenarios and features + Then I should see the following behavex console outputs and exit code "0" + | output_line | + | scenarios passed, 0 failed, 0 skipped | + | Exit code: 0 | + And I should not see error messages in the output + And I should see the same number of scenarios in the reports and the console output + + @RENAME + Scenario Outline: Renaming scenarios and features in parallel by scheme + Given I have installed behavex + When I setup the behavex command with "" parallel processes and parallel scheme set as "" + And I run the behavex command that renames scenarios and features + Then I should see the following behavex console outputs and exit code "0" + | output_line | + | scenarios passed, 0 failed, 0 skipped | + | Exit code: 0 | + And I should not see error messages in the output + And I should see the same number of scenarios in the reports and the console output + Examples: + | parallel_scheme | parallel_processes | + | scenario | 3 | + | feature | 2 | diff --git a/tests/features/secondary_features/environment.py b/tests/features/secondary_features/environment.py new file mode 100644 index 0000000..613ceb1 --- /dev/null +++ b/tests/features/secondary_features/environment.py @@ -0,0 +1,6 @@ + +def after_scenario(context, scenario): + if hasattr(context, 'new_scenario_name'): + scenario.name = context.new_scenario_name + if hasattr(context, 'new_feature_name'): + scenario.feature.name = context.new_feature_name diff --git a/tests/features/secondary_features/rename_tests.feature b/tests/features/secondary_features/rename_tests.feature new file mode 100644 index 0000000..21ba974 --- /dev/null +++ b/tests/features/secondary_features/rename_tests.feature @@ -0,0 +1,18 @@ +Feature: Rename Tests + + Scenario: This scenario changes the name of the scenario by adding a suffix + Given I rename the scenario from context to have the suffix " - RENAMED" + + Scenario Outline: This scenario changes the name of the scenario outline by adding a suffix + Given I rename the scenario from context to have the suffix "" + Examples: EXAMPLES_TITLE + | suffix | + | - RENAMED_1 | + | - RENAMED_2 | + Examples: "EXAMPLES_TITLE2" + | suffix | + | - RENAMED_3 | + | - RENAMED_4 | + + Scenario: This scenario changes the name of the feature by adding a suffix + Given I rename the feature from context to have the suffix " - RENAMED" diff --git a/tests/features/secondary_features/steps/secondary_steps.py b/tests/features/secondary_features/steps/secondary_steps.py index 3f2cdaa..ef69d35 100644 --- a/tests/features/secondary_features/steps/secondary_steps.py +++ b/tests/features/secondary_features/steps/secondary_steps.py @@ -1,25 +1,32 @@ +import logging + from behave import given, then, when @given('a failing condition') def step_impl(context): context.condition = 'fail' + logging.info('a failing condition') @given('a passing condition') def step_impl(context): context.condition = 'pass' + logging.info('a passing condition') @given('a condition to skip the scenario') def step_impl(context): context.condition = 'skip' + logging.info('a condition to skip the scenario') @given('a condition to exit the scenario') def step_impl(context): context.condition = 'exit' + logging.info('a condition to exit the scenario') @given('a condition to leave the scenario untested') def step_impl(context): context.condition = 'untested' + logging.info('a condition to leave the scenario untested') @then('I perform the condition') def step_impl(context): @@ -38,3 +45,14 @@ def step_impl(context): elif context.condition == 'untested': # This step will be skipped pass + +@given('I rename the {feature_or_scenario} from context to have the suffix "{suffix}"') +def step_impl(context, feature_or_scenario, suffix): + if feature_or_scenario == 'feature': + context.new_feature_name = context.feature.name + suffix + logging.info('I rename the feature from \n"{}" \nto \n"{}"'.format(context.feature.name, context.new_feature_name)) + elif feature_or_scenario == 'scenario': + context.new_scenario_name = context.scenario.name + suffix + logging.info('I rename the scenario from \n"{}" \nto \n"{}"'.format(context.scenario.name, context.new_scenario_name)) + else: + raise ValueError('Invalid element, it should be "feature" or "scenario"') diff --git a/tests/features/skipped_scenarios.feature b/tests/features/skipped_scenarios.feature index 3c43194..1065a91 100644 --- a/tests/features/skipped_scenarios.feature +++ b/tests/features/skipped_scenarios.feature @@ -9,3 +9,4 @@ Feature: Skipped Scenarios | 0 scenarios passed, 0 failed, 1 skipped | | Exit code: 0 And I should not see error messages in the output + And I should see the same number of scenarios in the reports and the console output diff --git a/tests/features/steps/execution_steps.py b/tests/features/steps/execution_steps.py index 48f8c7a..7abd758 100644 --- a/tests/features/steps/execution_steps.py +++ b/tests/features/steps/execution_steps.py @@ -1,6 +1,7 @@ import logging import os import random +import re import subprocess from behave import given, then, when @@ -8,59 +9,79 @@ root_project_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..')) tests_features_path = os.path.join(root_project_path, 'tests', 'features') -@given('I have the progress bar enabled') + +@given('The progress bar is enabled') def step_impl(context): context.progress_bar = True + @when('I run the behavex command with a passing test') @when('I run the behavex command with passing tests') def step_impl(context): - execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features/passing_tests.feature'), '-o', 'output/output_{}'.format(get_random_number(6))] + context.output_path = 'output/output_{}'.format(get_random_number(6)) + execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features/passing_tests.feature'), '-o', context.output_path] + execute_command(context, execution_args) + + +@when('I run the behavex command that renames scenarios and features') +def step_impl(context): + context.output_path = 'output/output_{}'.format(get_random_number(6)) + execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features/rename_tests.feature'), '-o', context.output_path] execute_command(context, execution_args) + @when('I run the behavex command with a failing test') def step_impl(context): - execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features/failing_tests.feature'), '-o', 'output/output_{}'.format(get_random_number(6))] + context.output_path = 'output/output_{}'.format(get_random_number(6)) + execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features/failing_tests.feature'), '-o', context.output_path] execute_command(context, execution_args) @when('I run the behavex command with a crashing test') -@when('I run the behavex command with a crashing test with "{parallel_processes}" parallel processes and parallel scheme set as "{parallel_scheme}"') def step_impl(context, parallel_processes="1", parallel_scheme='scenario'): + context.output_path = 'output/output_{}'.format(get_random_number(6)) execution_args = ['behavex', os.path.join(tests_features_path, os.path.join(tests_features_path, 'crashing_features/crashing_tests.feature')), - '-o', 'output/output_{}'.format(get_random_number(6)), - '--parallel-processes', parallel_processes, - '--parallel-scheme', parallel_scheme] + '-o', context.output_path] execute_command(context, execution_args) @when('I run the behavex command with a skipped test') def step_impl(context): - execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features/skipped_tests.feature'), '-o', 'output/output_{}'.format(get_random_number(6))] + context.output_path = 'output/output_{}'.format(get_random_number(6)) + execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features/skipped_tests.feature'), '-o', context.output_path] execute_command(context, execution_args) @when('I run the behavex command with an untested test') def step_impl(context): - execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features/untested_tests.feature'), '-o', 'output/output_{}'.format(get_random_number(6))] + context.output_path = 'output/output_{}'.format(get_random_number(6)) + execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features/untested_tests.feature'), '-o', context.output_path] execute_command(context, execution_args) @when('I run the behavex command with "{parallel_processes}" parallel processes and parallel scheme set as "{parallel_schema}"') def step_impl(context, parallel_processes, parallel_schema): - execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features/'), '-o', 'output/output_{}'.format(get_random_number(6)), '--parallel-processes', parallel_processes, '--parallel-scheme', parallel_schema] + context.output_path = 'output/output_{}'.format(get_random_number(6)) + execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features/'), '-o', context.output_path, '--parallel-processes', parallel_processes, '--parallel-scheme', parallel_schema] execute_command(context, execution_args) +@when('I setup the behavex command with "{parallel_processes}" parallel processes and parallel scheme set as "{parallel_scheme}"') +def step_impl(context, parallel_processes, parallel_scheme): + context.parallel_processes = parallel_processes + context.parallel_scheme = parallel_scheme + + @when('I run the behavex command with the following scheme, processes and tags') def step_impl(context): scheme = context.table[0]['parallel_scheme'] processes = context.table[0]['parallel_processes'] tags = context.table[0]['tags'] + context.output_path = 'output/output_{}'.format(get_random_number(6)) tags_to_folder_name = get_tags_string(tags) tags_array = get_tags_arguments(tags) - execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features/'), '-o', 'output/output_{}'.format(get_random_number(6)), '--parallel-processes', processes, '--parallel-scheme', scheme] + tags_array + execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features/'), '-o', context.output_path, '--parallel-processes', processes, '--parallel-scheme', scheme] + tags_array execute_command(context, execution_args) @@ -69,14 +90,16 @@ def step_impl(context): tags = context.table[0]['tags'] tags_to_folder_name = get_tags_string(tags) tags_array = get_tags_arguments(tags) - execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features/'), '-o', 'output/output_{}'.format(get_random_number(6))] + tags_array + context.output_path = 'output/output_{}'.format(get_random_number(6)) + execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features/'), '-o', context.output_path] + tags_array execute_command(context, execution_args) @when('I run the behavex command by performing a dry run') def step_impl(context): # generate a random number between 1 and 1000000 completing with zeroes to 6 digits - execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features/'), '-o', 'output/output_{}'.format(get_random_number(6)), '--dry-run'] + context.output_path = 'output/output_{}'.format(get_random_number(6)) + execution_args = ['behavex', os.path.join(tests_features_path, 'secondary_features/'), '-o', context.output_path, '--dry-run'] execute_command(context, execution_args) @@ -95,6 +118,7 @@ def step_impl(context): for message in error_messages: assert message not in context.result.stdout.lower(), f"Unexpected output: {context.result.stdout}" + @then('I should not see exception messages in the output') def step_impl(context): exception_messages = ["exception", "traceback"] @@ -102,21 +126,81 @@ def step_impl(context): assert message not in context.result.stdout.lower(), f"Unexpected output: {context.result.stdout}" +@then('I should see the same number of scenarios in the reports and the console output') +def step_impl(context): + total_scenarios_in_html_report = get_total_scenarios_in_html_report(context) + logging.info(f"Total scenarios in the HTML report: {total_scenarios_in_html_report}") + total_scenarios_in_junit_reports = get_total_scenarios_in_junit_reports(context) + logging.info(f"Total scenarios in the JUnit reports: {total_scenarios_in_junit_reports}") + total_scenarios_in_console_output = get_total_scenarios_in_console_output(context) + logging.info(f"Total scenarios in the console output: {total_scenarios_in_console_output}") + assert total_scenarios_in_html_report == total_scenarios_in_junit_reports == total_scenarios_in_console_output, f"Expected {total_scenarios} scenarios in the reports and the console output, but found {total_scenarios_in_html_report} in the HTML report, {total_scenarios_in_junit_reports} in the JUnit reports, and {total_scenarios_in_console} in the console output" + + +@then('I should see the same number of scenarios in the reports') +def step_impl(context): + total_scenarios_in_html_report = get_total_scenarios_in_html_report(context) + logging.info(f"Total scenarios in the HTML report: {total_scenarios_in_html_report}") + total_scenarios_in_junit_reports = get_total_scenarios_in_junit_reports(context) + logging.info(f"Total scenarios in the JUnit reports: {total_scenarios_in_junit_reports}") + assert total_scenarios_in_html_report == total_scenarios_in_junit_reports, f"Expected {total_scenarios} scenarios in the reports, but found {total_scenarios_in_html_report} in the HTML report, {total_scenarios_in_junit_reports} in the JUnit reports" + + def get_tags_arguments(tags): tags_array = [] for tag in tags.split(' '): tags_array += tag.split('=') return tags_array + def get_tags_string(tags): return tags.replace('-t=','_AND_').replace('~','NOT_').replace(',','_OR_').replace(' ','').replace('@','') + def get_random_number(total_digits): return str(random.randint(1, 1000000)).zfill(total_digits) -def execute_command(context, command, print_output=True): + +def get_total_scenarios_in_console_output(context): + #Verifying the scenarios in the console output + console_output = context.result.stdout + # Extract the number of scenarios by analyzing the following pattern: X scenarios passed, Y failed, Z skipped + scenario_pattern = re.compile(r'(\d+) scenario.? passed, (\d+) failed, (\d+) skipped') + match = scenario_pattern.search(console_output) + if match: + scenarios_passed = int(match.group(1)) + scenarios_failed = int(match.group(2)) + scenarios_skipped = int(match.group(3)) + else: + raise ValueError("No scenarios found in the console output") + return scenarios_passed + scenarios_failed + scenarios_skipped + + +def get_total_scenarios_in_html_report(context): + report_path = os.path.abspath(os.path.join(context.output_path, 'report.html')) + with open(report_path, 'r') as file: + html_content = file.read() + return html_content.count('data-scenario-tags=') + + +def get_total_scenarios_in_junit_reports(context): + junit_folder = os.path.abspath(os.path.join(context.output_path, 'behave')) + total_scenarios_in_junit_reports = 0 + for file in os.listdir(junit_folder): + if file.endswith('.xml'): + with open(os.path.join(junit_folder, file), 'r') as file: + xml_content = file.read() + total_scenarios_in_junit_reports += xml_content.count('