diff --git a/README.md b/README.md index 18cc75f..7efbb02 100644 --- a/README.md +++ b/README.md @@ -151,10 +151,13 @@ This file will then be passed to the underlying Ruby interpreter with You can find several test harnesses in this repository: * harness - the normal default harness, with duration controlled by warmup iterations and time/count limits -* harness-perf - a simplified harness that runs for exactly the hinted number of iterations * harness-bips - a harness that measures iterations/second until stable * harness-continuous - a harness that adjusts the batch sizes of iterations to run in stable iteration size batches +* harness-once - a simplified harness that simply runs once +* harness-perf - a simplified harness that runs for exactly the hinted number of iterations +* harness-stackprof - a harness to profile the benchmark with stackprof * harness-stats - count method calls and loop iterations +* harness-vernier - a harness to profile the benchmark with vernier * harness-warmup - a harness which runs as long as needed to find warmed up (peak) performance To use it, run a benchmark script directly, specifying a harness directory with `-I`: diff --git a/harness-stackprof/harness.rb b/harness-stackprof/harness.rb new file mode 100644 index 0000000..ef01342 --- /dev/null +++ b/harness-stackprof/harness.rb @@ -0,0 +1,84 @@ +# frozen_string_literal: true + +# Profile the benchmark (ignoring initialization code) with stackprof. +# Customize stackprof options with an env var of STACKPROF_OPTS='key:value,...'. +# Usage: +# STACKPROF_OPTS='mode:object' MIN_BENCH_TIME=0 MIN_BENCH_ITRS=1 ruby -v -I harness-stackprof benchmarks/.../benchmark.rb +# STACKPROF_OPTS='mode:cpu,interval:10' MIN_BENCH_TIME=1 MIN_BENCH_ITRS=10 ruby -v -I harness-stackprof benchmarks/.../benchmark.rb + +require_relative "../harness/harness-common" +require_relative "../harness/harness-extra" + +ensure_global_gem("stackprof") + +# Default to collecting more information so that more post-processing options are +# available (like generating a flamegraph). +DEFAULTS = { + aggregate: true, + raw: true, +}.freeze + +# Convert strings of "true" or "false" to their actual boolean values (or raise). +BOOLS = {"true" => true, "false" => false} +def bool!(val) + case val + when TrueClass, FalseClass + # Respect values that are already booleans so that we can specify defaults intuitively. + val + else + BOOLS.fetch(val) { raise ArgumentError, "must be 'true' or 'false'" } + end +end + +# Parse the string of "key:value,..." into a hash that we can pass to stackprof. +def parse_opts_string(str) + return {} unless str + + str.split(/,/).map { |x| x.strip.split(/[=:]/, 2) }.to_h.transform_keys(&:to_sym) +end + +# Get options for stackprof from env var and convert strings to the types stackprof expects. +def stackprof_opts + opts = DEFAULTS.merge(parse_opts_string(ENV['STACKPROF_OPTS'])) + + bool = method(:bool!) + + # Use {key: conversion_proc_or_sym} to convert present options to their necessary types. + { + aggregate: bool, + raw: bool, + mode: :to_sym, + interval: :to_i, + }.each do |key, method| + next unless opts.key?(key) + + method = proc(&method) if method.is_a?(Symbol) + opts[key] = method.call(opts[key]) + rescue => error + raise ArgumentError, "Option '#{key}' failed to convert: #{error}" + end + + opts +end + +def run_benchmark(n, &block) + require "stackprof" + + opts = stackprof_opts + prefix = "stackprof" + prefix = "#{prefix}-#{opts[:mode]}" if opts[:mode] + + out = output_file_path(prefix: prefix, ext: "dump") + StackProf.run(out: out, **opts) do + run_enough_to_profile(n, &block) + end + + # Show the basic textual report. + gem_exe("stackprof", "--text", out) + # Print the file path at the end to make it easy to copy the file name + # and use it for further analysis. + puts "Stackprof dump file:\n#{out}" + + # Dummy results to satisfy ./run_benchmarks.rb + return_results([0], [1.0]) if ENV['RESULT_JSON_PATH'] +end diff --git a/harness-vernier/harness.rb b/harness-vernier/harness.rb new file mode 100644 index 0000000..fb7fa24 --- /dev/null +++ b/harness-vernier/harness.rb @@ -0,0 +1,28 @@ +# frozen_string_literal: true + +# Profile the benchmark (ignoring initialization code) using vernier and display the profile. +# Set NO_VIERWER=1 to disable automatically opening the profile in a browser. +# Usage: +# MIN_BENCH_TIME=1 MIN_BENCH_ITRS=1 ruby -v -I harness-vernier benchmarks/... +# NO_VIEWER=1 MIN_BENCH_TIME=1 MIN_BENCH_ITRS=1 ruby -v -I harness-vernier benchmarks/... + +require_relative "../harness/harness-common" +require_relative "../harness/harness-extra" + +ensure_global_gem("vernier") +ensure_global_gem_exe("profile-viewer") + +def run_benchmark(n, &block) + require "vernier" + + out = output_file_path(ext: "json") + Vernier.profile(out: out) do + run_enough_to_profile(n, &block) + end + + puts "Vernier profile:\n#{out}" + gem_exe("profile-viewer", out) unless ENV['NO_VIEWER'] == '1' + + # Dummy results to satisfy ./run_benchmarks.rb + return_results([0], [1.0]) +end diff --git a/harness/harness-extra.rb b/harness/harness-extra.rb new file mode 100644 index 0000000..ab2cebd --- /dev/null +++ b/harness/harness-extra.rb @@ -0,0 +1,76 @@ +# frozen_string_literal: true + +# Ensure a gem is installed globally (and add it to the load path) +# in a way that doesn't interfere with the benchmark's bundler setup. +def ensure_global_gem(name) + found = Gem.find_latest_files(name).first + unless found + Gem.install(name) + found = Gem.find_latest_files(name).first + end + warn "Adding to load path: #{File.dirname(found)}" + $LOAD_PATH << File.dirname(found) +end + +# Ensure an executable provided by the gem is present +# (useful for profile-viewer which has no lib, only the exe). +def ensure_global_gem_exe(name, exe = name) + Gem.bin_path(name, exe) +rescue Gem::GemNotFoundException + Gem.install(name) +end + +# Call a gem exe, removing any bundler env vars that might confuse it. +def gem_exe(*args) + system({'RUBYOPT' => '', 'BUNDLER_SETUP' => nil}, *args) +end + +# Get benchmark base name from the file path. +def benchmark_name + $0.match(%r{([^/]+?)(?:(?:/benchmark)?\.rb)?$})[1] +end + +# Get name of harness (stackprof, vernier, etc) from the file path of the loaded harness. +def harness_name + $LOADED_FEATURES.reverse_each do |feat| + if m = feat.match(%r{/harness-([^/]+)/harness\.rb$}) + return m[1] + end + end + raise "Unable to determine harness name" +end + +# Share a single timestamp for everything from this execution. +TIMESTAMP = Time.now.strftime('%F-%H%M%S') + +# Create a consistent file path in the data directory +# so that the data can be further analyzed. +def output_file_path(prefix: harness_name, suffix: benchmark_name, ruby_info: ruby_version_info, timestamp: TIMESTAMP, ext: "bin") + File.expand_path("../data/#{prefix}-#{timestamp}-#{ruby_info}-#{suffix}.#{ext}", __dir__) +end + +# Can we get the benchmark config name from somewhere? +def ruby_version_info + "#{RUBY_ENGINE}-#{RUBY_ENGINE_VERSION}" +end + +def get_time + Process.clock_gettime(Process::CLOCK_MONOTONIC) +end + +MIN_BENCH_TIME = Integer(ENV.fetch('MIN_BENCH_TIME', 10)) + +# Ensure the benchmark runs enough times for profilers to get sufficient data when sampling. +# Use the "n" hint (provided by the benchmarks themselves) as a starting point +# but allow that to be overridden by MIN_BENCH_ITRS env var. +# Also use MIN_BENCH_TIME to loop until the benchmark has run for a sufficient duration. +def run_enough_to_profile(n, &block) + start = get_time + loop do + # Allow MIN_BENCH_ITRS to override the argument. + n = ENV.fetch('MIN_BENCH_ITRS', n).to_i + n.times(&block) + + break if (get_time - start) >= MIN_BENCH_TIME + end +end