-
Notifications
You must be signed in to change notification settings - Fork 9
/
evaluator.py
executable file
·157 lines (126 loc) · 5.08 KB
/
evaluator.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
#!/usr/bin/env python3
import argparse
import logging
import pathlib
import sys
from pathlib import Path
from typing import Optional
from ktoolbox import common
import evalConfig
import tftbase
from tftbase import FlowTestOutput
from tftbase import TftResult
from tftbase import TftResults
logger = logging.getLogger("tft." + __name__)
class Evaluator:
eval_config: evalConfig.Config
def __init__(self, config: Optional[evalConfig.Config | str | pathlib.Path]):
if not isinstance(config, evalConfig.Config):
config = evalConfig.Config.parse_from_file(config)
self.eval_config = config
def eval_flow_test_output(self, flow_test: FlowTestOutput) -> FlowTestOutput:
item = self.eval_config.get_item(
test_type=flow_test.tft_metadata.test_type,
test_case_id=flow_test.tft_metadata.test_case_id,
is_reverse=flow_test.tft_metadata.reverse,
)
bitrate_threshold_rx: Optional[float] = None
bitrate_threshold_tx: Optional[float] = None
if item is not None:
bitrate_threshold_rx = item.get_threshold(rx=True)
bitrate_threshold_tx = item.get_threshold(tx=True)
success = True
msg: Optional[str] = None
if not flow_test.success:
success = False
if flow_test.msg is not None:
msg = f"Run failed: {flow_test.msg}"
else:
msg = "Run failed for unspecified reason"
elif not flow_test.bitrate_gbps.is_passing(bitrate_threshold_rx, rx=True):
success = False
msg = f"Run succeeded but {flow_test.bitrate_gbps} is below RX threshold {bitrate_threshold_rx}"
elif not flow_test.bitrate_gbps.is_passing(bitrate_threshold_tx, tx=True):
success = False
msg = f"Run succeeded but {flow_test.bitrate_gbps} is below TX threshold {bitrate_threshold_tx}"
return flow_test.clone(
eval_result=tftbase.EvalResult(
success=success,
msg=msg,
bitrate_threshold_rx=bitrate_threshold_rx,
bitrate_threshold_tx=bitrate_threshold_tx,
),
)
def eval_test_result(self, tft_result: TftResult) -> TftResult:
new_flow_test = self.eval_flow_test_output(tft_result.flow_test)
new_plugins = [
plugin_output.plugin.eval_plugin_output(
tft_result.flow_test.tft_metadata,
plugin_output,
)
for plugin_output in tft_result.plugins
]
return TftResult(
flow_test=new_flow_test,
plugins=tuple(new_plugins),
)
def eval(
self,
tft_results: TftResults,
) -> TftResults:
lst = [self.eval_test_result(tft_result) for tft_result in tft_results]
return TftResults(lst=tuple(lst))
def eval_from_file(
self,
filename: str | Path,
) -> TftResults:
return self.eval(
TftResults.parse_from_file(filename),
)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Tool to evaluate TFT Flow test results"
)
parser.add_argument(
"config",
metavar="config",
type=str,
help='YAML configuration file with tft test thresholds. See "eval-config.yaml". '
"The configuration can also contain only a subset of the relevant configurations. "
"Evaluation will successfully pass if thresholds as missing. "
"Also, the entire configuration can be empty (either an empty "
"YAML file or only '{}') or the filename can be '' to indicate a completely "
"empty configuration.",
)
parser.add_argument(
"logs",
type=str,
help='Result file from a traffic flow test run. The "tft.py" tool by default writes this as file '
'"./ft-logs/$TIMESTAMP.json". Also, the test always already performs an evaluation with '
"the provided eval config YAML (which can be empty or omitted). The input format is the same as the "
"output format and the same as the test produces.",
)
parser.add_argument(
"output",
type=str,
help="Output file to write evaluation results to. This is the same format as the input argument "
"'logs'. You can pass the output to evaluator.py again for updating the evaluation.",
)
common.log_argparse_add_argument_verbose(parser)
args = parser.parse_args()
common.log_config_logger(args.verbose, "tft", "ktoolbox")
if args.config and not Path(args.config).exists():
logger.error(f"No config file found at {args.config}, exiting")
sys.exit(-1)
if not args.logs or not Path(args.logs).exists():
logger.error(f"Log file {args.logs} does not exist")
sys.exit(-1)
return args
def main() -> None:
args = parse_args()
evaluator = Evaluator(args.config)
tft_results = evaluator.eval_from_file(args.logs)
tft_results.serialize_to_file(args.output)
tft_results.get_pass_fail_status().log()
if __name__ == "__main__":
main()