forked from preshing/analyze-spec-benchmarks
-
Notifications
You must be signed in to change notification settings - Fork 2
/
check-autoparallel.py
51 lines (41 loc) · 1.75 KB
/
check-autoparallel.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
#!/usr/bin/env python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import open
from future import standard_library
standard_library.install_aliases()
import collections
import csv
import math
def iterCsvRecords(path, className):
with open(path, 'r') as f:
reader = csv.reader(f)
clazz = None
for row in reader:
if clazz is None:
clazz = collections.namedtuple(className, row)
else:
yield clazz(*row)
benchTable = collections.defaultdict(dict)
for brec in iterCsvRecords('benchmarks.txt', 'BenchmarkRecord'):
benchTable[brec.testID][brec.benchName] = brec
def geomAverage(values):
averageExp = sum([math.log(x) for x in values]) / len(values)
return math.exp(averageExp)
for benchType in ['INT', 'FP']:
print('Top contributing benchmarks to %s results, by maximum multiple of the geometric average:' % benchType)
topBenchResults = {}
summaryTable = {}
for srec in iterCsvRecords('summaries.txt', 'SummaryRecord'):
summaryTable[srec.testID] = srec
if srec.autoParallel == 'Yes' and benchType in srec.benchType:
base = geomAverage([float(brec.base) for brec in list(benchTable[srec.testID].values())])
for brec in list(benchTable[srec.testID].values()):
r = (float(brec.base) / base, brec)
topBenchResults[brec.benchName] = max(r, topBenchResults.get(brec.benchName, (0, None)))
for v, k in sorted([(v, k) for k, v in list(topBenchResults.items())], reverse=True):
benchValue, brec = v
print(benchValue, k, brec.testID, summaryTable[brec.testID].machine)
print()