Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add --no_yjit mode to burn-in test script #348

Merged
merged 1 commit into from
Dec 2, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 25 additions & 16 deletions burn_in.rb
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
num_procs: Etc.nprocessors,
num_long_runs: 4,
categories: ['headline', 'other'],
no_yjit: false,
})

# Parse the command-line options
Expand All @@ -43,6 +44,10 @@
opts.on("--category=headline,other,micro", "when given, only benchmarks with specified categories will run") do |v|
args.categories = v.split(",")
end

opts.on("--no_yjit", "when given, test with the CRuby interpreter, without enabling YJIT") do
args.no_yjit = true
end
end.parse!

def free_file_path(parent_dir, name_prefix)
Expand All @@ -54,7 +59,7 @@ def free_file_path(parent_dir, name_prefix)
end
end

def run_benchmark(bench_name, logs_path, run_time, ruby_version)
def run_benchmark(bench_name, no_yjit, logs_path, run_time, ruby_version)
# Determine the path to the benchmark script
script_path = File.join('benchmarks', bench_name, 'benchmark.rb')
if not File.exist?(script_path)
Expand All @@ -75,18 +80,22 @@ def run_benchmark(bench_name, logs_path, run_time, ruby_version)
env.merge!(test_env_vars)

# Assemble random command-line options to test
test_options = [
"--yjit-call-threshold=#{[1, 2, 10, 30].sample()}",
"--yjit-cold-threshold=#{[1, 2, 5, 10, 500, 50_000].sample()}",
[
"--yjit-mem-size=#{[1, 2, 3, 4, 5, 10, 64, 128].sample()}",
"--yjit-exec-mem-size=#{[1, 2, 3, 4, 5, 10, 64, 128].sample()}",
].sample(),
['--yjit-code-gc', nil].sample(),
['--yjit-perf', nil].sample(),
['--yjit-stats', nil].sample(),
['--yjit-log=/dev/null', nil].sample(),
].compact
if no_yjit
test_options = []
else
test_options = [
"--yjit-call-threshold=#{[1, 2, 10, 30].sample()}",
"--yjit-cold-threshold=#{[1, 2, 5, 10, 500, 50_000].sample()}",
[
"--yjit-mem-size=#{[1, 2, 3, 4, 5, 10, 64, 128].sample()}",
"--yjit-exec-mem-size=#{[1, 2, 3, 4, 5, 10, 64, 128].sample()}",
].sample(),
['--yjit-code-gc', nil].sample(),
['--yjit-perf', nil].sample(),
['--yjit-stats', nil].sample(),
['--yjit-log=/dev/null', nil].sample(),
].compact
end

# Assemble the command string
cmd = [
Expand Down Expand Up @@ -137,12 +146,12 @@ def run_benchmark(bench_name, logs_path, run_time, ruby_version)
return false
end

def test_loop(bench_names, logs_path, run_time, ruby_version)
def test_loop(bench_names, no_yjit, logs_path, run_time, ruby_version)
error_found = false

while true
bench_name = bench_names.sample()
error = run_benchmark(bench_name, logs_path, run_time, ruby_version)
error = run_benchmark(bench_name, no_yjit, logs_path, run_time, ruby_version)
error_found ||= error

if error_found
Expand Down Expand Up @@ -194,7 +203,7 @@ def test_loop(bench_names, logs_path, run_time, ruby_version)
args.num_procs.times do |i|
pid = Process.fork do
run_time = (i < args.num_long_runs)? (3600 * 2):10
test_loop(bench_names, args.logs_path, run_time, ruby_version)
test_loop(bench_names, args.no_yjit, args.logs_path, run_time, ruby_version)
end
end

Expand Down
Loading