diff --git a/features/GEM003_Unique-representation-identifier.feature b/features/GEM003_Unique-representation-identifier.feature index 18715188..1cef0ba6 100644 --- a/features/GEM003_Unique-representation-identifier.feature +++ b/features/GEM003_Unique-representation-identifier.feature @@ -12,4 +12,4 @@ The rule verifies that Shape Representation identifier is unique within the prod Given Its attribute Representations Given its attribute RepresentationIdentifier - Then The values must be unique \ No newline at end of file + Then The values must be unique at depth 1 diff --git a/features/GRF001_Identical-coordinate-operations.feature b/features/GRF001_Identical-coordinate-operations.feature index 259292c9..b99de30b 100644 --- a/features/GRF001_Identical-coordinate-operations.feature +++ b/features/GRF001_Identical-coordinate-operations.feature @@ -15,4 +15,4 @@ Currently, for GRF001, this is only permitted if (1) there is a single context o Given Its Attribute HasCoordinateOperation Given Its values excluding SourceCRS - Then The values must be identical at depth 1 + Then The values must be identical at depth 2 diff --git a/features/steps/thens/values.py b/features/steps/thens/values.py index eaa784e8..e7fa42d8 100644 --- a/features/steps/thens/values.py +++ b/features/steps/thens/values.py @@ -12,9 +12,9 @@ from parse_type import TypeBuilder from utils import misc -register_type(unique_or_identical=TypeBuilder.make_enum(dict(map(lambda x: (x, x), ("be unique", "be identical"))))) # todo @gh remove 'be' from enum values -register_type(value_or_type=TypeBuilder.make_enum(dict(map(lambda x: (x, x), ("value", "type"))))) # todo @gh remove 'be' from enum values -register_type(values_or_types=TypeBuilder.make_enum(dict(map(lambda x: (x, x), ("values", "types"))))) # todo @gh remove 'be' from enum values +register_type(unique_or_identical=TypeBuilder.make_enum(dict(map(lambda x: (x, x), ("unique", "identical"))))) +register_type(value_or_type=TypeBuilder.make_enum(dict(map(lambda x: (x, x), ("value", "type"))))) +register_type(values_or_types=TypeBuilder.make_enum(dict(map(lambda x: (x, x), ("values", "types"))))) def apply_is_a(inst): if isinstance(inst, (list, tuple)): @@ -59,32 +59,20 @@ def step_impl(context, inst, constraint, num): yield ValidationOutcome(inst=inst, expected= constraint, observed = f"Not {constraint}", severity=OutcomeSeverity.ERROR) -@gherkin_ifc.step("The {value} must {constraint:unique_or_identical}") -@gherkin_ifc.step("The values must {constraint:unique_or_identical}") -@gherkin_ifc.step("The values must {constraint:unique_or_identical} at depth 1") -def step_impl(context, inst, constraint, num=None): - - within_model = getattr(context, 'within_model', False) - - #to account for order-dependency of removing characters from constraint - while constraint.startswith('be ') or constraint.startswith('in '): - constraint = constraint[3:] - - instances = [context.instances] if within_model else context.instances - - if constraint in ('identical', 'unique'): - for i, values in enumerate(instances): - if not values: - continue - if constraint == 'identical': - if not all([values[0] == i for i in values]): - yield ValidationOutcome(inst=inst, expected= constraint, observed = f"Not {constraint}", severity=OutcomeSeverity.ERROR) - if constraint == 'unique': - seen = set() - duplicates = [x for x in values if x in seen or seen.add(x)] - if not duplicates: - continue - yield ValidationOutcome(inst=inst, expected= constraint, observed = f"Not {constraint}", severity=OutcomeSeverity.ERROR) +@gherkin_ifc.step("The values must be {constraint:unique_or_identical} at depth {depth_level:d}") +def step_impl(context, inst, constraint, depth_level=None): + if not inst: + return + + if constraint == 'identical': + if not all([inst[0] == i for i in inst]): + yield ValidationOutcome(inst=inst, expected= constraint, observed = inst, severity=OutcomeSeverity.ERROR) + + if constraint == 'unique': + seen = set() + duplicates = [x for x in inst if x in seen or seen.add(x)] + if duplicates: + yield ValidationOutcome(inst=inst, expected= constraint, observed = inst, severity=OutcomeSeverity.ERROR) def recursive_unpack_value(item): diff --git a/features/steps/validation_handling.py b/features/steps/validation_handling.py index eb4a9f6c..ee420661 100644 --- a/features/steps/validation_handling.py +++ b/features/steps/validation_handling.py @@ -1,5 +1,6 @@ import functools import json +import re from utils import misc from functools import wraps import ifcopenshell @@ -148,9 +149,9 @@ def handle_given(context, fn, **kwargs): pass # (1) -> context.applicable is set within the function ; replace this with a simple True/False and set applicability here? else: context._push('attribute') # for attribute stacking - if 'at depth 1' in context.step.name: - #todo @gh develop a more standardize approach - context.instances = list(filter(None, map_given_state(context.instances, fn, context, depth=1, **kwargs))) + depth = next(map(int, re.findall('at depth (\d+)$', context.step.name)), 0) + if depth: + context.instances = list(filter(None, map_given_state(context.instances, fn, context, depth=depth, **kwargs))) else: context.instances = map_given_state(context.instances, fn, context, **kwargs) @@ -167,9 +168,8 @@ def is_nested(val): def should_apply(values, depth): if depth == 0: return not is_nested(values) - elif depth == 1: - return is_nested(values) and all(not is_nested(v) for v in values) - return False + else: + return is_nested(values) and all(should_apply(v, depth-1) for v in values if v is not None) if should_apply(values, depth): return None if values is None else apply_operation(fn, values, context, **kwargs) @@ -275,9 +275,8 @@ def is_nested(val): def should_apply(items, depth): if depth == 0: return not is_nested(items) - elif depth == 1: - return is_nested(items) and all(not is_nested(v) for v in items) - return False + else: + return is_nested(items) and all(should_apply(v, depth-1) for v in items if v is not None) if context.is_global_rule: return apply_then_operation(fn, [items], context, current_path=None, **kwargs) @@ -293,7 +292,8 @@ def should_apply(items, depth): # so we take note of the outcomes that already existed. This is necessary since we accumulate # outcomes per feature and no longer per scenario. num_preexisting_outcomes = len(context.gherkin_outcomes) - map_then_state(instances, fn, context, depth = 1 if 'at depth 1' in context.step.name.lower() else 0, **kwargs) + depth = next(map(int, re.findall('at depth (\d+)$', context.step.name)), 0) + map_then_state(instances, fn, context, depth = depth, **kwargs) # evokes behave error generate_error_message(context, [gherkin_outcome for gherkin_outcome in context.gherkin_outcomes[num_preexisting_outcomes:] if gherkin_outcome.severity in [OutcomeSeverity.WARNING, OutcomeSeverity.ERROR]])