Skip to content

Commit

Permalink
Merge pull request #300 from buildingSMART/IVS-137-ALB012-IfcAlignmen…
Browse files Browse the repository at this point in the history
…tVerticalSegment.RadiusOfCurvature

IVS-137 ALB012 Vertical Segment Radius of Curvature
  • Loading branch information
civilx64 authored Nov 2, 2024
2 parents c3dd1a7 + afdc400 commit e9f4441
Show file tree
Hide file tree
Showing 16 changed files with 1,843 additions and 75 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
@implementer-agreement
@ALB
@version1
@E00020
Feature: ALB012 - Alignment vertical segment radius of curvature
The rule verifies the 'RadiusOfCurvature' design parameter for vertical alignment segments.

Background:
Given A model with Schema "IFC4.3"
Given An IfcAlignmentVertical
Given A relationship IfcRelNests from IfcAlignmentVertical to IfcAlignmentSegment and following that
Given Its attribute DesignParameters
Given Its entity type is 'IfcAlignmentVerticalSegment'

Scenario: Validating the absence of curvature radius for specific predefined types of vertical segment
Given PredefinedType != 'ARC' or 'PARABOLICARC'
Then The value of attribute RadiusOfCurvature must be empty

Scenario: Validating the radius of curvature for parabolic segments
Given PredefinedType = 'PARABOLICARC'
Then The value of attribute RadiusOfCurvature must be equal to the expression: HorizontalLength / ( EndGradient - StartGradient )
39 changes: 24 additions & 15 deletions features/environment.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,12 @@
from collections import Counter
import os
from rule_creation_protocol import protocol
from features.exception_logger import ExceptionSummary
import json

from validation_results import ValidationOutcome, ValidationOutcomeCode, OutcomeSeverity
from main import ExecutionMode


model_cache = {}
def read_model(fn):
if cached := model_cache.get(fn):
Expand All @@ -27,7 +27,7 @@ def before_feature(context, feature):
context.validation_task_id = None
Scenario.continue_after_failed_step = False

context.protocol_errors = []
context.protocol_errors, context.caught_exceptions = [], []
if context.config.userdata.get('execution_mode') and eval(context.config.userdata.get('execution_mode')) == ExecutionMode.TESTING:
ifc_filename_incl_path = context.config.userdata.get('input')
convention_attrs = {
Expand All @@ -47,7 +47,7 @@ def before_feature(context, feature):
context.gherkin_outcomes = []

# display the correct scenario and insanity related to the gherkin outcome in the behave console & ci/cd report
context.scenario_outcome_state= {}
context.scenario_outcome_state= []
context.instance_outcome_state = {}


Expand All @@ -63,6 +63,11 @@ def get_validation_outcome_hash(obj):
def after_scenario(context, scenario):
# Given steps may introduce an arbitrary amount of stackframes.
# we need to clean them up before behave starts appending new ones.

if context.failed:
if not 'Behave errors' in context.step.error_message: #exclude behave output from exception logging
context.caught_exceptions.append(ExceptionSummary.from_context(context))

old_outcomes = getattr(context, 'gherkin_outcomes', [])
while context._stack[0].get('@layer') == 'attribute':
context._pop()
Expand Down Expand Up @@ -126,11 +131,8 @@ def get_or_create_instance_when_set(spf_id):

else: # invoked via console or CI/CD pipeline
outcomes = [outcome.to_dict() for outcome in context.gherkin_outcomes]
for idx, outcome in enumerate(outcomes):
sls = find_scenario_for_outcome(context, idx + 1)
outcome['scenario'] = sls['scenario']
outcome['last_step'] = sls['last_step'].name
outcome['instance_id'] = context.instance_outcome_state.get(idx+1, '')
update_outcomes_with_scenario_data(context, outcomes)

outcomes_json_str = json.dumps(outcomes) #ncodes to utf-8
outcomes_bytes = outcomes_json_str.encode("utf-8")
for formatter in filter(lambda f: hasattr(f, "embedding"), context._runner.formatters):
Expand All @@ -140,10 +142,17 @@ def get_or_create_instance_when_set(spf_id):
protocol_errors_bytes = json.dumps(context.protocol_errors).encode("utf-8")
formatter.embedding(mime_type="application/json", data=protocol_errors_bytes, target='feature', attribute_name='protocol_errors')


def find_scenario_for_outcome(context, outcome_index):
previous_count = 0
for count, scenario in context.scenario_outcome_state.items():
if previous_count < outcome_index <= count:
return scenario
previous_count = count

# embed catched exceptions
caught_exceptions_bytes = json.dumps([exc.to_dict() for exc in context.caught_exceptions]).encode("utf-8")
formatter.embedding(mime_type="application/json", data=caught_exceptions_bytes, target='feature', attribute_name='caught_exceptions')


def update_outcomes_with_scenario_data(context, outcomes):
for outcome_index, outcome in enumerate(outcomes):
sls = next((data for idx, data in context.scenario_outcome_state if idx == outcome_index), None)

if sls is not None:
outcome['scenario'] = sls['scenario']
outcome['last_step'] = sls['last_step'].name
outcome['instance_id'] = sls.get('instance_id')
51 changes: 51 additions & 0 deletions features/exception_logger.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
import os
from dataclasses import dataclass, asdict
import json

@dataclass
class ExceptionSummary:
"""
Custom exception for summarizing internal errors during feature validation.
The exception is forwarded to the JSON output and later evaluated after the Behave run using pytest.
Errors occurring at any stage of the step implementation or while running the custom decorator are captured by this exception.
"""
feature: str
step: str
error_type: str
location: str

@staticmethod
def extract_traceback_summary(exc_traceback):
trace = {}

current_tb = exc_traceback
while current_tb is not None:
filename = os.path.basename(current_tb.tb_frame.f_code.co_filename)
line_number = current_tb.tb_lineno

if filename not in trace:
trace[filename] = [line_number]
elif line_number not in trace[filename]:
trace[filename].append(line_number)

current_tb = current_tb.tb_next

trace_list = []
for filename in reversed(trace):
line_numbers = ", ".join(f"#{ln}" for ln in trace[filename])
trace_list.append(f"{filename}(l{line_numbers})")


return ", ".join(trace_list)

@classmethod
def from_context(cls, context):
feature_name = context.feature.name
step_name = context.step.name
error_type = str(context.step.exception.__class__.__name__)
location = cls.extract_traceback_summary(context.step.exc_traceback)

return cls(feature=feature_name, step=step_name, error_type=error_type, location=location)

def to_dict(self):
return asdict(self)
4 changes: 2 additions & 2 deletions features/rule_creation_protocol/protocol.py
Original file line number Diff line number Diff line change
Expand Up @@ -283,11 +283,11 @@ def validate_test_filename(cls, value):



"""Check if test file start with pass or fail"""
"""Check if test file start with a valid prefix"""
if result not in ('pass', 'fail', 'na'):
raise ProtocolError(
value=value,
message=f"Name of the result file must start with 'pass', 'fail' or 'na'. In that case name starts with: {result}"
message=f"Name of the test file must start with 'pass', 'fail', or 'na'. This file name starts with: {result}"
)

"""Check if a second part of the test file is a rule code"""
Expand Down
7 changes: 7 additions & 0 deletions features/steps/givens/attributes.py
Original file line number Diff line number Diff line change
Expand Up @@ -229,3 +229,10 @@ def step_impl(context, inst, ff : FirstOrFinal):
@gherkin_ifc.step("An IFC model")
def step_impl(context):
yield ValidationOutcome(instance_id = context.model, severity=OutcomeSeverity.PASSED)

@gherkin_ifc.step('Each instance pair at depth 1')
def step_impl(context, inst):
pairs = list()
for i in range(0, len(inst) - 1):
pairs.append([inst[i], inst[i+1]])
yield ValidationOutcome(instance_id = [pairs], severity=OutcomeSeverity.PASSED)
153 changes: 120 additions & 33 deletions features/steps/thens/attributes.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import operator

import ifcopenshell

from utils import misc, system, geometry
from validation_handling import gherkin_ifc
Expand Down Expand Up @@ -60,37 +60,124 @@ def accumulate_errors(i):
yield from errors


@gherkin_ifc.step('The value of attribute {attribute} must be {value}')
@gherkin_ifc.step('The value of attribute {attribute} must be {value} {display_entity:display_entity}')
def step_impl(context, inst, attribute, value, display_entity=0):
# @todo the horror and inconsistency.. should we use
# ast here as well to differentiate between types?
pred = operator.eq
if value == 'empty':
value = ()
elif value == 'not empty':
value = ()
pred = operator.ne
elif 'or' in value:
opts = value.split(' or ')
value = tuple(opts)
pred = misc.reverse_operands(operator.contains)

if isinstance(inst, (tuple, list)):
inst = inst[0]
attribute_value = getattr(inst, attribute, 'Attribute not found')
if attribute_value is None:
attribute_value = ()
if inst is None:
# nothing was activated by the Given criteria
yield ValidationOutcome(inst=inst, severity=OutcomeSeverity.EXECUTED)
elif not pred(attribute_value, value):
yield ValidationOutcome(
inst=inst,
expected=None if not value else value,
observed=misc.recursive_unpack_value(attribute_value),
severity=OutcomeSeverity.ERROR
)
@gherkin_ifc.step('The value of attribute {attribute} must be {value_or_comparison_op}')
@gherkin_ifc.step('The value of attribute {attribute} must be {value_or_comparison_op} {display_entity:display_entity}')
@gherkin_ifc.step('The value of attribute {attribute} must be {value_or_comparison_op} the expression: {expression}')
def step_impl(context, inst, attribute:str, value_or_comparison_op:str, expression:str=None, display_entity=0):
"""
Compare an attribute to an expression based on attributes.
The {comparison_op} operator can be 'equal to', 'not equal to', 'greater than', 'less than', 'greater than or equal to', and 'less than or equal to'.
The {expression} should be composed by attribute values, and use the following operators:
+ : addition;
- : subtraction;
* : multiplication;
/ : division;
% : modulus;
** : exponentiation.
"""

operators = {
'+' : operator.add,
'-' : operator.sub,
'*' : operator.mul,
'/' : operator.truediv,
'%' : operator.mod,
'**' : operator.pow,
'equal to' : operator.eq,
'not equal to' : operator.ne,
'greater than' : operator.gt,
'less than' : operator.gt,
'greater than or equal to' : operator.ge,
'less than or equal to' : operator.le,
}

if expression is not None:
# Get compared attribute value

attr_compared_value = getattr(inst, attribute, 'Compared attribute not found')
if isinstance(attr_compared_value, ifcopenshell.entity_instance):
raise Exception('Compared attribute value is an IFC entity')

# Replace attribute names with attribute values in the expression
for string_content in expression.split():
# Checks if the string is not a operator neither parenthesis
if string_content not in [*operators, '(', ')']:
if hasattr(inst, string_content):
if not isinstance(getattr(inst, string_content), ifcopenshell.entity_instance):
expression = expression.replace(string_content, str(getattr(inst, string_content)))
else:
raise Exception('Expression attribute value is an IFC entity')
else:
raise Exception('Expression attribute not found')

# Evaluate the string expression using eval
try:
expression_value = eval(expression)
except Exception as e:
raise ValueError(f"Error evaluating expression: {e}")

# Compare the attribute with the expression value, considering the precision
entity_contexts = geometry.recurrently_get_entity_attr(context, inst, 'IfcRepresentation', 'ContextOfItems')
precision = geometry.get_precision_from_contexts(entity_contexts)

try:
result = geometry.compare_with_precision(
attr_compared_value, expression_value, precision, value_or_comparison_op
)
if result:
yield ValidationOutcome(
inst=inst,
expected=f"A value {value_or_comparison_op} {expression_value} with precision {precision}",
observed={attr_compared_value},
severity=OutcomeSeverity.PASSED,
)
else:
yield ValidationOutcome(
inst=inst,
expected=f"A value {value_or_comparison_op} {expression_value}",
observed={attr_compared_value},
severity=OutcomeSeverity.ERROR,
)
except ValueError as e:
yield ValidationOutcome(
inst=inst,
expected=f"A value {value_or_comparison_op} {expression_value}",
observed=f"Error during comparison: {e}",
severity=OutcomeSeverity.ERROR,
)


else:
# @todo the horror and inconsistency.. should we use
# ast here as well to differentiate between types?
pred = operator.eq
if value_or_comparison_op == 'empty':
value_or_comparison_op = ()
elif value_or_comparison_op == 'not empty':
value_or_comparison_op = ()
pred = operator.ne
elif 'or' in value_or_comparison_op:
opts = value_or_comparison_op.split(' or ')
value_or_comparison_op = tuple(opts)
pred = misc.reverse_operands(operator.contains)

if isinstance(inst, (tuple, list)):
inst = inst[0]
attribute_value = getattr(inst, attribute, 'Attribute not found')
if attribute_value is None:
attribute_value = ()
if inst is None:
# nothing was activated by the Given criteria
yield ValidationOutcome(inst=inst, severity=OutcomeSeverity.EXECUTED)
elif not pred(attribute_value, value_or_comparison_op):
yield ValidationOutcome(
inst=inst,
expected=None if not value_or_comparison_op else value_or_comparison_op,
observed=misc.recursive_unpack_value(attribute_value),
severity=OutcomeSeverity.ERROR
)

@gherkin_ifc.step('The {field} of the {file_or_model} must be "{values}"')
def step_impl(context, inst, field, file_or_model, values):
Expand Down Expand Up @@ -134,7 +221,7 @@ def step_impl(context, inst, constraint, num):
inst = str(inst)
op = misc.stmt_to_op(constraint)
if not op(len(inst), num):
yield ValidationOutcome(inst=inst, expected={'length':num, 'expected_or_observed':'expected'}, observed={'length': len(inst), 'expected_or_observed':'observed', 'inst':inst}, severity=OutcomeSeverity.ERROR)
yield ValidationOutcome(inst=inst, expected={'length':num, 'expected_or_observed':'expected'}, observed={'length': len(inst), 'expected_or_observed':'observed', 'inst':inst}, severity=OutcomeSeverity.ERROR)


@gherkin_ifc.step('The characters must be within the official encoding character set')
Expand Down
Loading

0 comments on commit e9f4441

Please sign in to comment.