Skip to content

Commit e6164a8

Browse files
Allow running the benchmarks the old way.
1 parent 1d82993 commit e6164a8

27 files changed

+482
-405
lines changed

benchmarks/.libs/legacyutils.py

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
import sys
2+
3+
4+
def maybe_handle_legacy(bench_func, *args, loopsarg='loops', legacyarg=None):
5+
if '--legacy' not in sys.argv:
6+
return
7+
argv = list(sys.argv[1:])
8+
argv.remove('--legacy')
9+
10+
kwargs = {}
11+
if legacyarg:
12+
kwargs[legacyarg] = True
13+
if argv:
14+
assert loopsarg
15+
kwargs[loopsarg] = int(argv[0])
16+
17+
_, times = bench_func(*args, **kwargs)
18+
if len(argv) > 1:
19+
json.dump(times, open(argv[1], 'w'))
20+
21+
sys.exit(0)

benchmarks/bm_aiohttp/legacyutils.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
../.libs/legacyutils.py

benchmarks/bm_aiohttp/run_benchmark.py

Lines changed: 30 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,15 @@
1313
ARGV = [sys.executable, "serve.py"]
1414

1515

16+
#############################
17+
# benchmarks
18+
1619
def bench_aiohttp_requests(loops=3000):
20+
elapsed, _ = _bench_aiohttp_requests(loops)
21+
return elapsed
22+
23+
24+
def _bench_aiohttp_requests(loops=3000, legacy=False):
1725
"""Measure N HTTP requests to a local server.
1826
1927
Note that the server is freshly started here.
@@ -27,17 +35,36 @@ def bench_aiohttp_requests(loops=3000):
2735
Hence this should be used with bench_time_func()
2836
insted of bench_func().
2937
"""
38+
start = pyperf.perf_counter()
3039
elapsed = 0
40+
times = []
3141
with netutils.serving(ARGV, DATADIR, "127.0.0.1:8080"):
3242
requests_get = requests.get
33-
for _ in range(loops):
43+
for i in range(loops):
44+
# This is a macro benchmark for a Python implementation
45+
# so "elapsed" covers more than just how long a request takes.
3446
t0 = pyperf.perf_counter()
3547
requests_get("http://localhost:8080/blog/").text
36-
elapsed += pyperf.perf_counter() - t0
37-
return elapsed
48+
t1 = pyperf.perf_counter()
49+
50+
elapsed += t1 - t0
51+
times.append(t0)
52+
if legacy and (i % 100 == 0):
53+
print(i, t0 - start)
54+
times.append(pyperf.perf_counter())
55+
if legacy:
56+
total = times[-1] - start
57+
print("%.2fs (%.3freq/s)" % (total, loops / total))
58+
return elapsed, times
3859

3960

61+
#############################
62+
# the script
63+
4064
if __name__ == "__main__":
65+
from legacyutils import maybe_handle_legacy
66+
maybe_handle_legacy(_bench_aiohttp_requests, legacyarg='legacy')
67+
4168
runner = pyperf.Runner()
4269
runner.metadata['description'] = "Test the performance of aiohttp"
4370
runner.bench_time_func("aiohttp", bench_aiohttp_requests)
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
../.libs/legacyutils.py

benchmarks/bm_djangocms/run_benchmark.py

Lines changed: 49 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -117,6 +117,11 @@ def _ensure_datadir(datadir, preserve=True):
117117
# benchmarks
118118

119119
def bench_djangocms_requests(sitedir, loops=INNER_LOOPS):
120+
elapsed, _ = _bench_djangocms_requests(loops)
121+
return elapsed
122+
123+
124+
def _bench_djangocms_requests(sitedir, loops=INNER_LOOPS, legacy=False):
120125
"""Measure N HTTP requests to a local server.
121126
122127
Note that the server is freshly started here.
@@ -130,14 +135,26 @@ def bench_djangocms_requests(sitedir, loops=INNER_LOOPS):
130135
Hence this should be used with bench_time_func()
131136
insted of bench_func().
132137
"""
138+
start = pyperf.perf_counter()
133139
elapsed = 0
140+
times = []
134141
with netutils.serving(ARGV_SERVE, sitedir, "127.0.0.1:8000"):
135-
requests_get = requests.get
136-
for _ in range(loops):
142+
for i in range(loops):
143+
# This is a macro benchmark for a Python implementation
144+
# so "elapsed" covers more than just how long a request takes.
137145
t0 = pyperf.perf_counter()
138-
requests_get("http://localhost:8000/").text
139-
elapsed += pyperf.perf_counter() - t0
140-
return elapsed
146+
requests.get("http://localhost:8000/").text
147+
t1 = pyperf.perf_counter()
148+
149+
elapsed += t1 - t0
150+
times.append(t0)
151+
if legacy and (i % 100 == 0):
152+
print(i, t0 - start)
153+
times.append(pyperf.perf_counter())
154+
if legacy:
155+
total = times[-1] - start
156+
print("%.2fs (%.3freq/s)" % (total, loops / total))
157+
return elapsed, times
141158

142159

143160
# We can't set "add_cmdline_args" on pyperf.Runner
@@ -157,48 +174,48 @@ def add_worker_args(cmd, _):
157174
)
158175

159176

177+
#############################
178+
# the script
179+
160180
if __name__ == "__main__":
161181
"""
162182
Usage:
163-
python djangocms.py
164-
python djangocms.py --setup DATADIR
165-
python djangocms.py --serve DATADIR
183+
python benchmarks/bm_djangocms/run_benchmark.py
184+
python benchmarks/bm_djangocms/run_benchmark.py --setup DIR
185+
python benchmarks/bm_djangocms/run_benchmark.py --serve DIR
166186
167187
The first form creates a temporary directory, sets up djangocms in it,
168188
serves out of it, and removes the directory.
169189
The second form sets up a djangocms installation in the given directory.
170-
The third form runs a benchmark out of an already-set-up directory
190+
The third form runs the benchmark out of an already-set-up directory
171191
The second and third forms are useful if you want to benchmark the
172192
initial migration phase separately from the second serving phase.
173193
"""
174194
runner = _Runner()
175195
runner.metadata['description'] = "Test the performance of a Django data migration"
176196

177197
# Parse the CLI args.
178-
runner.argparser.add_argument("--setup", action="store_const", const=True)
198+
runner.argparser.add_argument("--legacy", action='store_true')
179199
group = runner.argparser.add_mutually_exclusive_group()
200+
group.add_argument("--setup")
180201
group.add_argument("--serve")
181-
group.add_argument("datadir", nargs="?")
182202
args = runner.argparser.parse_args()
183203

184-
if args.serve is not None:
204+
if args.setup is not None:
205+
args.datadir = args.setup
206+
args.setup = True
207+
args.serve = False
208+
elif args.serve is not None:
185209
args.datadir = args.serve
210+
args.setup = False
186211
args.serve = True
187-
if not args.setup:
188-
args.setup = False
189-
if not args.datadir:
190-
runner.argparser.error("missing datadir")
191-
elif not os.path.exists(args.datadir):
192-
cmd = f"{sys.executable} {sys.argv[0]} --setup {args.datadir}?"
193-
sys.exit(f"ERROR: Did you forget to run {cmd}?")
194-
default = False
195-
elif args.setup is not None:
196-
args.serve = False
197-
default = False
212+
if not os.path.exists(args.datadir):
213+
cmd = f"{sys.executable} {sys.argv[0]} --setup {args.datadir}?"
214+
sys.exit(f"ERROR: Did you forget to run {cmd}?")
198215
else:
216+
args.datadir = None
199217
args.setup = True
200218
args.serve = True
201-
default = True
202219

203220
# DjangoCMS looks for Python on $PATH?
204221
_ensure_python_on_PATH()
@@ -209,8 +226,9 @@ def add_worker_args(cmd, _):
209226
# First, set up the site.
210227
if args.setup:
211228
sitedir, elapsed = setup(datadir)
212-
print("%.2fs to initialize db" % (elapsed,))
213-
print(f"site created in {sitedir}")
229+
if args.legacy:
230+
print("%.2fs to initialize db" % (elapsed,))
231+
print(f"site created in {sitedir}")
214232
if not args.serve:
215233
print(f"now run {sys.executable} {sys.argv[0]} --serve {datadir}")
216234
else:
@@ -219,6 +237,11 @@ def add_worker_args(cmd, _):
219237

220238
# Then run the benchmark.
221239
if args.serve:
240+
if args.legacy:
241+
from legacyutils import maybe_handle_legacy
242+
maybe_handle_legacy(_bench_djangocms_requests, sitedir, legacyarg='legacy')
243+
sys.exit(0)
244+
222245
runner.datadir = datadir
223246

224247
def time_func(loops, *args):
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
../.libs/legacyutils.py

benchmarks/bm_flaskblogging/run_benchmark.py

Lines changed: 27 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,11 @@
1717
# benchmarks
1818

1919
def bench_flask_requests(loops=1800):
20+
elapsed, _ = _bench_flask_requests(loops)
21+
return elapsed
22+
23+
24+
def _bench_flask_requests(loops=1800, legacy=False):
2025
"""Measure N HTTP requests to a local server.
2126
2227
Note that the server is freshly started here.
@@ -30,17 +35,36 @@ def bench_flask_requests(loops=1800):
3035
Hence this should be used with bench_time_func()
3136
insted of bench_func().
3237
"""
38+
start = pyperf.perf_counter()
3339
elapsed = 0
40+
times = []
3441
with netutils.serving(ARGV, DATADIR, "127.0.0.1:8000"):
3542
requests_get = requests.get
36-
for _ in range(loops):
43+
for i in range(loops):
44+
# This is a macro benchmark for a Python implementation
45+
# so "elapsed" covers more than just how long a request takes.
3746
t0 = pyperf.perf_counter()
3847
requests_get("http://localhost:8000/blog/").text
39-
elapsed += pyperf.perf_counter() - t0
40-
return elapsed
48+
t1 = pyperf.perf_counter()
4149

50+
elapsed += t1 - t0
51+
times.append(t0)
52+
if legacy and (i % 100 == 0):
53+
print(i, t0 - start)
54+
times.append(pyperf.perf_counter())
55+
if legacy:
56+
total = times[-1] - start
57+
print("%.2fs (%.3freq/s)" % (total, loops / total))
58+
return elapsed, times
59+
60+
61+
#############################
62+
# the script
4263

4364
if __name__ == "__main__":
65+
from legacyutils import maybe_handle_legacy
66+
maybe_handle_legacy(_bench_flask_requests, legacyarg='legacy')
67+
4468
runner = pyperf.Runner()
4569
runner.metadata['description'] = "Test the performance of flask"
4670
runner.bench_time_func("flaskblogging", bench_flask_requests)

benchmarks/bm_gevent_hub/run_benchmark.py

Lines changed: 16 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,9 @@ def bench_switch(loops=1000):
8383
for _ in range(loops):
8484
t0 = pyperf.perf_counter()
8585
child_switch()
86-
elapsed += pyperf.perf_counter() - t0
86+
t1 = pyperf.perf_counter()
87+
88+
elapsed += t1 - t0
8789
return elapsed
8890

8991

@@ -97,7 +99,9 @@ def bench_wait_ready(loops=1000):
9799
for _ in range(loops):
98100
t0 = pyperf.perf_counter()
99101
hub_wait(watcher)
100-
elapsed += pyperf.perf_counter() - t0
102+
t1 = pyperf.perf_counter()
103+
104+
elapsed += t1 - t0
101105
return elapsed
102106

103107

@@ -145,9 +149,19 @@ def bench_wait_func_ready(loops=1000):
145149
}
146150

147151

152+
#############################
153+
# the script
154+
148155
if __name__ == "__main__":
156+
import sys
157+
if '--legacy' in sys.argv:
158+
for i in range(10000):
159+
bench_switch()
160+
sys.exit(0)
161+
149162
runner = pyperf.Runner()
150163
runner.metadata['description'] = "Test the performance of gevent"
164+
runner.argparser.add_argument("--legacy", action='store_true')
151165
runner.argparser.add_argument("benchmark", nargs="?",
152166
choices=sorted(BENCHMARKS),
153167
default="gevent_hub")

benchmarks/bm_gunicorn/legacyutils.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
../.libs/legacyutils.py

benchmarks/bm_gunicorn/run_benchmark.py

Lines changed: 30 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,15 @@
2323
]
2424

2525

26+
#############################
27+
# benchmarks
28+
2629
def bench_gunicorn(loops=3000):
30+
elapsed, _ = _bench_gunicorn(loops)
31+
return elapsed
32+
33+
34+
def _bench_gunicorn(loops=3000, legacy=False):
2735
"""Measure N HTTP requests to a local server.
2836
2937
Note that the server is freshly started here.
@@ -37,17 +45,36 @@ def bench_gunicorn(loops=3000):
3745
Hence this should be used with bench_time_func()
3846
insted of bench_func().
3947
"""
48+
start = pyperf.perf_counter()
4049
elapsed = 0
50+
times = []
4151
with netutils.serving(ARGV, DATADIR, ADDR):
4252
requests_get = requests.get
43-
for _ in range(loops):
53+
for i in range(loops):
54+
# This is a macro benchmark for a Python implementation
55+
# so "elapsed" covers more than just how long a request takes.
4456
t0 = pyperf.perf_counter()
4557
requests_get("http://localhost:8000/blog/").text
46-
elapsed += pyperf.perf_counter() - t0
47-
return elapsed
58+
t1 = pyperf.perf_counter()
59+
60+
elapsed += t1 - t0
61+
times.append(t0)
62+
if legacy and (i % 100 == 0):
63+
print(i, t0 - start)
64+
times.append(pyperf.perf_counter())
65+
if legacy:
66+
total = times[-1] - start
67+
print("%.2fs (%.3freq/s)" % (total, loops / total))
68+
return elapsed, times
4869

4970

71+
#############################
72+
# the script
73+
5074
if __name__ == "__main__":
75+
from legacyutils import maybe_handle_legacy
76+
maybe_handle_legacy(_bench_gunicorn, legacyarg='legacy')
77+
5178
runner = pyperf.Runner()
5279
runner.metadata['description'] = "Test the performance of gunicorn"
5380
runner.bench_time_func("gunicorn", bench_gunicorn)

benchmarks/bm_json/legacyutils.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
../.libs/legacyutils.py

0 commit comments

Comments
 (0)