Skip to content

Commit

Permalink
Add benchmark analyzer (#259)
Browse files Browse the repository at this point in the history
* Review benchmark local script and add analyzer

* add analyzer env

* test analyzer
  • Loading branch information
tmigot authored Jul 2, 2024
1 parent cbeff59 commit 2f10876
Show file tree
Hide file tree
Showing 20 changed files with 125 additions and 30 deletions.
4 changes: 2 additions & 2 deletions benchmark/Manifest.toml
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,11 @@

julia_version = "1.9.1"
manifest_format = "2.0"
project_hash = "14fe4b55e0aa680d5c90f646c1a87c8fc8737479"
project_hash = "0cb0756144aac73ae8e2d06d9a0f6567a7a2f964"

[[deps.ADNLPModels]]
deps = ["ADTypes", "ForwardDiff", "LinearAlgebra", "NLPModels", "Requires", "ReverseDiff", "SparseArrays", "SparseConnectivityTracer", "SparseMatrixColorings"]
git-tree-sha1 = "ad4682ad3f6da4246a5a5408593e5824d949e5a0"
git-tree-sha1 = "2b582670fb51216d8d000c6de72934d1f68c4e7c"
repo-rev = "main"
repo-url = "https://github.com/JuliaSmoothOptimizers/ADNLPModels.jl"
uuid = "54578032-b7ea-4c30-94aa-7cbd1cce6c9a"
Expand Down
2 changes: 0 additions & 2 deletions benchmark/Project.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
[deps]
ADNLPModels = "54578032-b7ea-4c30-94aa-7cbd1cce6c9a"
BenchmarkProfiles = "ecbce9bc-3e5e-569d-9e29-55181f61f8d0"
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
Dates = "ade2ca70-3891-5945-98fb-dc099432e06a"
Expand All @@ -13,7 +12,6 @@ LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
NLPModels = "a4795742-8479-5a88-8948-cc11e1c8c1a6"
NLPModelsJuMP = "792afdf1-32c1-5681-94e0-d7bf7a5df49e"
OptimizationProblems = "5049e819-d29b-5fba-b941-0eee7e64c1c6"
Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80"
Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7"
ReverseDiff = "37e2e3b7-166d-5795-8a7a-e32c996b4267"
SolverBenchmark = "581a75fa-a23a-52d0-a590-d6201de2218a"
Expand Down
9 changes: 9 additions & 0 deletions benchmark/benchmark_analyzer/Project.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
[deps]
BenchmarkProfiles = "ecbce9bc-3e5e-569d-9e29-55181f61f8d0"
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
Dates = "ade2ca70-3891-5945-98fb-dc099432e06a"
JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819"
JSON = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
SolverBenchmark = "581a75fa-a23a-52d0-a590-d6201de2218a"
StatsPlots = "f3b207a7-027a-5e70-b257-86293d7955fd"
5 changes: 3 additions & 2 deletions benchmark/benchmarks.jl
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
# Include useful packages
using ADNLPModels
using Dates, DelimitedFiles, JLD2, LinearAlgebra, Printf, SparseArrays
using BenchmarkTools, DataFrames, Plots
using BenchmarkTools, DataFrames
#JSO packages
using NLPModels, BenchmarkProfiles, OptimizationProblems, SolverBenchmark
using NLPModels, OptimizationProblems, SolverBenchmark
# Most likely benchmark with JuMP as well
using JuMP, NLPModelsJuMP

include("problems_sets.jl")
verbose_subbenchmark = false

# Run locally with `tune!(SUITE)` and then `run(SUITE)`
const SUITE = BenchmarkGroup()
Expand Down
2 changes: 1 addition & 1 deletion benchmark/gradient/benchmarks_gradient.jl
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ for f in benchmark_list
for pb in problem_sets[s]
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
@info " $(pb): $T with $n vars and $m cons"
verbose_subbenchmark && @info " $(pb): $T with $n vars and $m cons"
g = zeros(T, n)
SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nlp, get_x0(nlp), $g) setup =
(nlp = set_adnlp($pb, $(name_backend), $(backend), $nscal, $T))
Expand Down
2 changes: 1 addition & 1 deletion benchmark/hessian/benchmarks_coloring.jl
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ for f in benchmark_list
for pb in problem_sets[s]
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
@info " $(pb): $T with $n vars and $m cons"
verbose_subbenchmark && @info " $(pb): $T with $n vars and $m cons"
SUITE["$(fun)"][f][T][s][b][pb] =
@benchmarkable set_adnlp($pb, $(name_backend), $backend, $nscal, $T)
end
Expand Down
2 changes: 1 addition & 1 deletion benchmark/hessian/benchmarks_hessian.jl
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ for f in benchmark_list
for pb in problem_sets[s]
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
@info " $(pb): $T with $n vars"
verbose_subbenchmark && @info " $(pb): $T with $n vars"
SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nlp, get_x0(nlp)) setup =
(nlp = set_adnlp($pb, $(name_backend), $backend, $nscal, $T))
end
Expand Down
2 changes: 1 addition & 1 deletion benchmark/hessian/benchmarks_hessian_lagrangian.jl
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ for f in benchmark_list
for pb in problem_sets[s]
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
@info " $(pb): $T with $n vars and $m cons"
verbose_subbenchmark && @info " $(pb): $T with $n vars and $m cons"
y = 10 * T[-(-1.0)^i for i = 1:m]
SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nlp, get_x0(nlp), $y) setup =
(nlp = set_adnlp($pb, $(name_backend), $backend, $nscal, $T))
Expand Down
2 changes: 1 addition & 1 deletion benchmark/hessian/benchmarks_hessian_residual.jl
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ for f in benchmark_list
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
nequ = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nls_nequ(n = $(nscal))"))
@info " $(pb): $T with $n vars, $nequ residuals and $m cons"
verbose_subbenchmark && @info " $(pb): $T with $n vars, $nequ residuals and $m cons"
v = 10 * T[-(-1.0)^i for i = 1:nequ]
SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nls, get_x0(nls), $v) setup =
(nls = set_adnls($pb, $(name_backend), $backend, $nscal, $T))
Expand Down
2 changes: 1 addition & 1 deletion benchmark/hessian/benchmarks_hprod.jl
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ for f in benchmark_list
for pb in problem_sets[s]
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
@info " $(pb): $T with $n vars"
verbose_subbenchmark && @info " $(pb): $T with $n vars"
v = [sin(T(i) / 10) for i = 1:n]
Hv = Vector{T}(undef, n)
SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nlp, get_x0(nlp), $v, $Hv) setup =
Expand Down
2 changes: 1 addition & 1 deletion benchmark/hessian/benchmarks_hprod_lagrangian.jl
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ for f in benchmark_list
for pb in problem_sets[s]
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
@info " $(pb): $T with $n vars"
verbose_subbenchmark && @info " $(pb): $T with $n vars"
y = 10 * T[-(-1.0)^i for i = 1:m]
v = [sin(T(i) / 10) for i = 1:n]
Hv = Vector{T}(undef, n)
Expand Down
2 changes: 1 addition & 1 deletion benchmark/jacobian/benchmarks_coloring.jl
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ for f in benchmark_list
for pb in problem_sets[s]
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
@info " $(pb): $T with $n vars and $m cons"
verbose_subbenchmark && @info " $(pb): $T with $n vars and $m cons"
SUITE["$(fun)"][f][T][s][b][pb] =
@benchmarkable set_adnlp($pb, $(name_backend), $backend, $nscal, $T)
end
Expand Down
2 changes: 1 addition & 1 deletion benchmark/jacobian/benchmarks_jacobian.jl
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ for f in benchmark_list
for pb in problem_sets[s]
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
@info " $(pb): $T with $n vars and $m cons"
verbose_subbenchmark && @info " $(pb): $T with $n vars and $m cons"
SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nlp, get_x0(nlp)) setup =
(nlp = set_adnlp($pb, $(name_backend), $backend, $nscal, $T))
end
Expand Down
2 changes: 1 addition & 1 deletion benchmark/jacobian/benchmarks_jacobian_residual.jl
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ for f in benchmark_list
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
nequ = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nls_nequ(n = $(nscal))"))
@info " $(pb): $T with $n vars, $nequ residuals and $m cons"
verbose_subbenchmark && @info " $(pb): $T with $n vars, $nequ residuals and $m cons"
SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nls, get_x0(nls)) setup =
(nls = set_adnls($pb, $(name_backend), $backend, $nscal, $T))
end
Expand Down
2 changes: 1 addition & 1 deletion benchmark/jacobian/benchmarks_jprod.jl
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ for f in benchmark_list
for pb in problem_sets[s]
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
@info " $(pb): $T with $n vars and $m cons"
verbose_subbenchmark && @info " $(pb): $T with $n vars and $m cons"
Jv = Vector{T}(undef, m)
v = 10 * T[-(-1.0)^i for i = 1:n]
SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nlp, get_x0(nlp), $v, $Jv) setup =
Expand Down
2 changes: 1 addition & 1 deletion benchmark/jacobian/benchmarks_jprod_residual.jl
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ for f in benchmark_list
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
nequ = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nls_nequ(n = $(nscal))"))
@info " $(pb): $T with $n vars, $nequ residuals and $m cons"
verbose_subbenchmark && @info " $(pb): $T with $n vars, $nequ residuals and $m cons"
Jv = Vector{T}(undef, nequ)
v = 10 * T[-(-1.0)^i for i = 1:n]
SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nlp, get_x0(nlp), $v, $Jv) setup =
Expand Down
2 changes: 1 addition & 1 deletion benchmark/jacobian/benchmarks_jtprod.jl
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ for f in benchmark_list
for pb in problem_sets[s]
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
@info " $(pb): $T with $n vars and $m cons"
verbose_subbenchmark && @info " $(pb): $T with $n vars and $m cons"
Jtv = Vector{T}(undef, n)
v = 10 * T[-(-1.0)^i for i = 1:m]
SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nlp, get_x0(nlp), $v, $Jtv) setup =
Expand Down
2 changes: 1 addition & 1 deletion benchmark/jacobian/benchmarks_jtprod_residual.jl
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ for f in benchmark_list
n = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nvar(n = $(nscal))"))
m = eval(Meta.parse("OptimizationProblems.get_" * pb * "_ncon(n = $(nscal))"))
nequ = eval(Meta.parse("OptimizationProblems.get_" * pb * "_nls_nequ(n = $(nscal))"))
@info " $(pb): $T with $n vars, $nequ residuals and $m cons"
verbose_subbenchmark && @info " $(pb): $T with $n vars, $nequ residuals and $m cons"
Jtv = Vector{T}(undef, n)
v = 10 * T[-(-1.0)^i for i = 1:nequ]
SUITE["$(fun)"][f][T][s][b][pb] = @benchmarkable $fun(nlp, get_x0(nlp), $v, $Jtv) setup =
Expand Down
65 changes: 65 additions & 0 deletions benchmark/run_analyzer.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
using Pkg
Pkg.activate("benchmark/benchmark_analyzer")
Pkg.instantiate()
using BenchmarkTools, Dates, JLD2, JSON, Plots, StatsPlots

# name of the result file:
name = ""
resultpath = joinpath(dirname(@__FILE__), "results")
if name == ""
name = replace(readdir(resultpath)[end], ".jld2" => "", ".json" => "")
end

@load joinpath(dirname(@__FILE__), "results", "$name.jld2") result
t = BenchmarkTools.load(joinpath(dirname(@__FILE__), "results", "$name.json"))

# plots
using StatsPlots
plot(t) # ou can use all the keyword arguments from Plots.jl, for instance st=:box or yaxis=:log10.

@info "Available benchmarks"
df_results = Dict{String, Dict{Symbol, DataFrame}}()
for benchmark in keys(result)
result_bench = result[benchmark] # one NLPModel API function
for benchmark_list in keys(result_bench)
for type_bench in keys(result_bench[benchmark_list])
for set_bench in keys(result_bench[benchmark_list][type_bench])
@info "$benchmark/$benchmark_list for type $type_bench on problem set $(set_bench)"
bench = result_bench[benchmark_list][type_bench][set_bench]
df_results["$(benchmark)_$(benchmark_list)_$(type_bench)_$(set_bench)"] = bg_to_df(bench)
end
end
end
end

function bg_to_df(bench::BenchmarkGroup)
solvers = collect(keys(bench)) # "jump", ...
nsolvers = length(solvers)
problems = collect(keys(bench[solvers[1]]))
nprob = length(problems)
dfT = Dict{Symbol, DataFrame}()
for solver in solvers
dfT[Symbol(solver)] = DataFrame(
[
[median(bench[solver][pb]).time for pb in problems],
[median(bench[solver][pb]).memory for pb in problems],
],
[:median_time, :median_memory]
)
end
return dfT
end

using SolverBenchmark, BenchmarkProfiles

# b::BenchmarkProfiles.AbstractBackend = PlotsBackend()
costs =[
df -> df.median_time,
df -> df.median_memory,
]
costnames = ["median time", "median memory"]
for key_benchmark in keys(df_results)
stats = df_results[key_benchmark]
p = profile_solvers(stats, costs, costnames)
savefig(p, "$(name)_$(key_benchmark).png")
end
42 changes: 32 additions & 10 deletions benchmark/run_local.jl
Original file line number Diff line number Diff line change
@@ -1,24 +1,46 @@
using Pkg, Logging, JLD2, Dates
using Pkg
Pkg.activate("benchmark")
# instantiate
# up ADNLPModels
Pkg.instantiate()
Pkg.update("ADNLPModels")
using Logging, JLD2, Dates

path = dirname(@__FILE__)
skip_tune = true

@info "INITIALIZE"
include("benchmarks.jl")

list_of_benchmark = keys(SUITE)
# gradient: SUITE[@tagged "grad!"]
# Coloring benchmark: SUITE[@tagged "hessian_backend" || "hessian_residual_backend" || "jacobian_backend" || "jacobian_residual_backend"]
# Matrix benchmark: SUITE[@tagged "hessian_backend" || "hessian_residual_backend" || "jacobian_backend" || "jacobian_residual_backend" || "hess_coord!" || "hess_coord_residual!" || "jac_coord!" || "jac_coord_residual!"]
# Matrix-vector products: SUITE[@tagged "hprod!" || "hprod_residual!" || "jprod!" || "jprod_residual!" || "jtprod!" || "jtprod_residual!"]

for benchmark_in_suite in list_of_benchmark
@info "$(benchmark_in_suite)"
end

@info "TUNE"
@time with_logger(ConsoleLogger(Error)) do # remove warnings
tune!(SUITE)
if !skip_tune
@time with_logger(ConsoleLogger(Error)) do
tune!(SUITE)
BenchmarkTools.save("params.json", params(suite));
end
else
@info "Skip tuning"
# https://juliaci.github.io/BenchmarkTools.jl/dev/manual/
BenchmarkTools.DEFAULT_PARAMETERS.evals = 1
end

@info "RUN"
@time result = with_logger(ConsoleLogger(Error)) do # remove warnings
run(SUITE)
if "params.json" in (path == "" ? readdir() : readdir(path))
loadparams!(suite, BenchmarkTools.load("params.json")[1], :evals, :samples);
end
run(SUITE, verbose = true)
end

@info "SAVE BENCHMARK RESULT"
name = "$(today())_adnlpmodels_benchmark"
@save "$name.jld2" result

@info "ANALYZE"
# save the result in a jld2 file?
# plots
BenchmarkTools.save("$name.json", result)

0 comments on commit 2f10876

Please sign in to comment.