forked from TechEmpower/FrameworkBenchmarks
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathprettytable_result_parser.py
105 lines (89 loc) · 3.13 KB
/
prettytable_result_parser.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
# -*- coding: UTF-8 -*-
import json as js
import os, getopt, argparse
from prettytable import PrettyTable
workload_lists = ["fortune", "plaintext", "db", "update", "json", "query"]
result_dict = {}
# result_dict[workload][test_name + file_name]
# result_dict[update][netty-20220409082258/results.json] = [ {'latencyAvg': '90.89us', 'latencyMax': '9.24ms'}, ... ]
connection_level = []
pipeline = []
queryIntervals = []
def parse_argument():
parser = argparse.ArgumentParser(
description="Analyse json resluts from FrameWork BenchMark.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
'--files',
default=None,
nargs='+',
help='json result files'
)
parser.add_argument(
'--datas',
default=['latencyAvg'],
nargs='+',
help='interested datas'
)
args = parser.parse_args()
return args
def read_files(args):
global connection_level
global queryIntervals
global pipeline
for f in args.files:
result = open(f)
json_text = result.read()
json_obj = js.loads(json_text)
raw_data = json_obj["rawData"]
connection_level = json_obj["concurrencyLevels"]
queryIntervals = json_obj["queryIntervals"]
pipeline = json_obj["pipelineConcurrencyLevels"]
for workload in workload_lists:
if (raw_data[workload]):
for test_name, test_result in raw_data[workload].items():
update_result(test_name, f, workload, test_result);
def update_result(test_name, file_name, workload, test_result):
name = test_name + '-' + file_name
if workload not in result_dict:
result_dict[workload] = {}
if name not in result_dict[workload]:
result_dict[workload][name] = test_result
def print_table(args):
for interested_data in args.datas:
for k, workload_results in result_dict.items():
pt = PrettyTable()
pt.title = "Type: " + k + ", Result: " + interested_data
pt.field_names = [map_workload_to_field(k)] + list(result_dict[k].keys())
for i in range(len(workload_results[next(iter(workload_results))])):
data_row = [v[i].get(interested_data) for v in workload_results.values()]
pt.add_row([map_workload_to_value(k)[i]] + data_row)
print(pt)
def map_workload_to_field(workload):
if (workload == 'query'):
return 'queryIntervals'
elif (workload == 'plaintext'):
return 'pipeline'
else:
return 'concurrencyLevel'
def map_workload_to_value(workload):
if (workload == 'query'):
return queryIntervals
elif (workload == 'plaintext'):
return pipeline
else:
return connection_level
def toms(s):
if s is None:
return '0';
elif s.find('ms') != -1:
return str(s.replace('ms', ''))
elif s.find('us') != -1:
return str(float(s.replace('us', '')) / 1000)
elif s.find('s') != -1:
return str(float(s.replace('s', "")) * 1000)
if __name__ == "__main__":
args = parse_argument()
read_files(args)
print_table(args)