-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathplot.py
135 lines (103 loc) · 4.86 KB
/
plot.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
import json
import numpy as np
import matplotlib.pyplot as plt
from typing import Dict, List, Tuple
from matplotlib import rc
import os
from pathlib import Path
def load_experiment_data(write_file: str, read_file: str) -> List[Tuple[int, float]]:
with open(write_file, 'r') as f:
write_data = json.load(f)
write_timestamps = {int(exp['experiment']): exp['timestamp'] for exp in write_data}
with open(read_file, 'r') as f:
read_data = json.load(f)
read_timestamps = {int(exp['experiment']): exp['quorum_reached'] for exp in read_data}
latencies = []
for exp_num in sorted(write_timestamps.keys()):
if exp_num in read_timestamps:
latency = read_timestamps[exp_num] - write_timestamps[exp_num]
latencies.append((exp_num, latency))
return latencies
def load_all_experiments(base_dir: str) -> Dict[int, List[float]]:
all_latencies: Dict[int, List[float]] = {}
for i in range(1, 2):
exp_dir = Path(f'{base_dir}/{i}')
write_file = exp_dir / 'experiment-write.json'
read_file = exp_dir / 'experiments.json'
try:
latencies = load_experiment_data(str(write_file), str(read_file))
for exp_num, latency in latencies:
if exp_num <= 15:
continue
if exp_num not in all_latencies:
all_latencies[exp_num] = []
all_latencies[exp_num].append(latency)
except FileNotFoundError as e:
print(f"Warning: Could not load files from directory {i}: {e}")
continue
return all_latencies
def plot_averaged_latencies(all_latencies_23: Dict[int, List[float]], all_latencies_45: Dict[int, List[float]]):
rc('text', usetex=True)
rc(
'font',
family='serif',
serif=['Computer Modern Roman'],
monospace=['Computer Modern Typewriter'],
size=12
)
x_23 = sorted(all_latencies_23.keys())
y_avg_23 = [np.mean(all_latencies_23[exp_num]) for exp_num in x_23]
y_std_23 = [np.std(all_latencies_23[exp_num]) for exp_num in x_23]
x_45 = sorted(all_latencies_45.keys())
y_avg_45 = [np.mean(all_latencies_45[exp_num]) for exp_num in x_45]
y_std_45 = [np.std(all_latencies_45[exp_num]) for exp_num in x_45]
plt.figure(figsize=(10, 6))
plt.plot(x_23, y_avg_23, 'b-', label=r'$\beta = 0, \gamma = \lfloor\frac{1}{3}n\rfloor$')
print(f"Last: {y_avg_23[-1]:.2f} ms")
plt.fill_between(x_23,
[avg - 1.96*std for avg, std in zip(y_avg_23, y_std_23)],
[avg + 1.96*std for avg, std in zip(y_avg_23, y_std_23)],
color='b', alpha=0.1)
plt.plot(x_45, y_avg_45, 'C1-', label=r'$\beta = \lfloor\frac{1}{5}n\rfloor, \gamma = 0$')
print(f"Last: {y_avg_45[-1]:.2f} ms")
plt.fill_between(x_45,
[avg - 1.96*std for avg, std in zip(y_avg_45, y_std_45)],
[avg + 1.96*std for avg, std in zip(y_avg_45, y_std_45)],
color='orange', alpha=0.2)
custom_ticks = [15] + list(np.arange(100, 1001, 100))
plt.xticks(custom_ticks)
y_max = max(200, max(
max(y + 1.96*std for y, std in zip(y_avg_23, y_std_23)),
max(y + 1.96*std for y, std in zip(y_avg_45, y_std_45))
))
plt.xlim(15, 1000)
plt.ylim(0, y_max)
plt.axhline(y=76.5, color='r', linestyle='--', label='RTT')
plt.xlabel(r'Number of replicas ($n$)')
plt.ylabel(r'Latency (ms)')
plt.title(r'End-to-end confirmation latency vs number of replicas')
plt.grid(True, linestyle='--', alpha=0.7)
plt.legend()
plt.savefig("plot.pdf", bbox_inches="tight")
def main():
all_latencies_23 = load_all_experiments('experiment-data-23')
all_latencies_45 = load_all_experiments('experiment-data-45')
plot_averaged_latencies(all_latencies_23, all_latencies_45)
print("\nStatistics for beta = 2/3:")
if all_latencies_23:
all_values = [lat for lats in all_latencies_23.values() for lat in lats]
avg_latency = np.mean(all_values)
std_latency = np.std(all_values)
print(f"Overall average latency: {avg_latency:.2f} ± {std_latency:.2f} ms")
print(f"Number of experiments: {len(all_latencies_23)}")
print(f"Number of runs per experiment: {len(next(iter(all_latencies_23.values())))}")
print("\nStatistics for beta = 4/5:")
if all_latencies_45:
all_values = [lat for lats in all_latencies_45.values() for lat in lats]
avg_latency = np.mean(all_values)
std_latency = np.std(all_values)
print(f"Overall average latency: {avg_latency:.2f} ± {std_latency:.2f} ms")
print(f"Number of experiments: {len(all_latencies_45)}")
print(f"Number of runs per experiment: {len(next(iter(all_latencies_45.values())))}")
if __name__ == "__main__":
main()