diff --git a/doc/usage.rst b/doc/usage.rst index ae753af7..95e9c25d 100644 --- a/doc/usage.rst +++ b/doc/usage.rst @@ -104,6 +104,7 @@ Usage:: [--append FILENAME] [--manifest MANIFEST] [--timeout TIMEOUT] [-b BM_LIST] [--inherit-environ VAR_LIST] [-p PYTHON] + [--hook HOOK] options:: @@ -146,6 +147,9 @@ options:: Use the same number of loops as a previous run (i.e., don't recalibrate). Should be a path to a .json file from a previous run. + --hook HOOK + Apply the given pyperf hook when running the + benchmarks. show ---- diff --git a/pyperformance/cli.py b/pyperformance/cli.py index 3348f62e..df68dc3d 100644 --- a/pyperformance/cli.py +++ b/pyperformance/cli.py @@ -19,6 +19,8 @@ cmd_compare, ) +from pyperf import _hooks + def comma_separated(values): values = [value.strip() for value in values.split(',')] @@ -93,6 +95,12 @@ def parse_args(): help="Specify a timeout in seconds for a single " "benchmark run (default: disabled)", type=check_positive) + hook_names = list(_hooks.get_hook_names()) + cmd.add_argument("--hook", + action="append", + choices=hook_names, + metavar=f"{', '.join(x for x in hook_names if not x.startswith('_'))}", + help="Apply the given pyperf hook(s) when running each benchmark") filter_opts(cmd) # show diff --git a/pyperformance/run.py b/pyperformance/run.py index 67ab5d89..e0df48d7 100644 --- a/pyperformance/run.py +++ b/pyperformance/run.py @@ -242,5 +242,8 @@ def get_pyperf_opts(options): opts.append('--min-time=%s' % options.min_time) if options.timeout: opts.append('--timeout=%s' % options.timeout) + if options.hook: + for hook in options.hook: + opts.append('--hook=%s' % hook) return opts diff --git a/pyperformance/tests/test_commands.py b/pyperformance/tests/test_commands.py index 870a58bc..42574f26 100644 --- a/pyperformance/tests/test_commands.py +++ b/pyperformance/tests/test_commands.py @@ -173,6 +173,25 @@ def test_run_test_benchmarks(self): capture=None, ) + def test_run_with_hook(self): + # We expect this to fail, since pystats requires a special build of Python + filename = self.resolve_tmp('bench-test-hook.json') + + stdout = self.run_pyperformance( + 'run', + '--manifest', os.path.join(tests.DATA_DIR, 'MANIFEST'), + '-b', 'all', + '-o', filename, + '--hook', 'pystats', + exitcode=1, + capture='combined' + ) + + self.assertIn( + "Can not collect pystats because python was not built with --enable-pystats", + stdout + ) + ################################### # compile