diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8ed89839..b57a90f6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -5,11 +5,13 @@ on: - master tags: '*' pull_request: + concurrency: # Skip intermediate builds: always. # Cancel intermediate builds: only if it is a pull request build. group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }} + jobs: test: name: PartitionedArrays / Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} @@ -110,3 +112,84 @@ jobs: using PartitionedArrays DocMeta.setdocmeta!(PartitionedArrays, :DocTestSetup, :(using PartitionedArrays); recursive=true) doctest(PartitionedArrays)' + + + benchmark: + name: Run performance regression tests + if: github.ref == 'refs/heads/main' + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + version: + - '1.10' + os: + - ubuntu-latest + arch: + - x64 + steps: + - uses: actions/checkout@v4 + - uses: julia-actions/setup-julia@v1 + with: + version: ${{ matrix.version }} + arch: ${{ matrix.arch }} + - uses: actions/cache@v4 + env: + cache-name: cache-artifacts + with: + path: ~/.julia/artifacts + key: runner.os−test−env.cache−name−{{ hashFiles('**/Project.toml') }} + restore-keys: | + runner.os−test− + ${{ env.cache-name }}- + ${{ runner.os }}-test- + ${{ runner.os }}- + - name: Run benchmark + run: | + julia --project=PartitionedArraysBenchmarks -e ' + using Pkg + Pkg.develop(path=".") + include("PartitionedArraysBenchmarks/run_benchmarks.jl")' + - name: Store benchmark result + uses: benchmark-action/github-action-benchmark@v1 + with: + name: Julia benchmark result + tool: 'julia' + output-file-path: output.json + gh-pages-branch: benchmark_results + benchmark-data-dir-path: bench + # Use personal access token instead of GITHUB_TOKEN due to https://github.community/t/github-action-not-triggering-gh-pages-upon-push/16096 + github-token: ${{ secrets.GITHUB_TOKEN }} + auto-push: true + # Show alert with commit comment on detecting possible performance regression + alert-threshold: '200%' + comment-on-alert: true + fail-on-alert: true + + copybenchmarks: + name: Copy benchmark results to gh-pages + if: github.ref == 'refs/heads/main' + needs: [benchmark, docs] + runs-on: ubuntu-latest + permissions: + contents: write + steps: + - name: Checkout gh-pages branch + uses: actions/checkout@v4 + with: + ref: gh-pages + path: gh-pages + - name: Checkout benchmark_results branch + uses: actions/checkout@v4 + with: + ref: benchmark_results + path: benchmark_results + - name: Copy benchmark results to gh-pages + run: | + cp -r benchmark_results/bench gh-pages/dev/benchmarks + cd gh-pages + git config user.name github-actions + git config user.email github-actions@github.com + git add -A + git commit -m 'Copy benchmark results to documentation on gh-pages' + git push diff --git a/PartitionedArraysBenchmarks/Project.toml b/PartitionedArraysBenchmarks/Project.toml new file mode 100644 index 00000000..7d2e4b45 --- /dev/null +++ b/PartitionedArraysBenchmarks/Project.toml @@ -0,0 +1,3 @@ +[deps] +BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" +PartitionedArrays = "5a9dfac6-5c52-46f7-8278-5e2210713be9" diff --git a/PartitionedArraysBenchmarks/run_benchmarks.jl b/PartitionedArraysBenchmarks/run_benchmarks.jl new file mode 100644 index 00000000..c467fbdb --- /dev/null +++ b/PartitionedArraysBenchmarks/run_benchmarks.jl @@ -0,0 +1,45 @@ +module PartitionedArraysBenchmarkTests + +using BenchmarkTools + +using SparseArrays +using PartitionedArrays +using LinearAlgebra + +function test_benchmark(n) + """ + Test benchmark with dummy code + """ + + T = SparseMatrixCSC{Float64,Int} + Ti = indextype(T) + Tv = eltype(T) + I = Ti[1,2,5,4,1] + J = Ti[3,6,1,1,3] + V = Tv[4,5,3,2,5] + m = 7 + n = 6 + + B = compresscoo(T,I,J,V,m,n) + + b1 = ones(Tv,size(B,1)) + b2 = ones(Tv,size(B,1)) + x = collect(Tv,1:size(B,2)) + mul!(b1,B,x) + spmv!(b2,B,x) +end + + +# Build a benchmark suite for PartitionedArrays +suite = BenchmarkGroup() +suite["test-suite"] = BenchmarkGroup(["test", "test_tag"]) +suite["test-suite"]["n=10"] = @benchmarkable test_benchmark(10) + +# Run all benchmarks +tune!(suite) +results = run(suite, verbose = true) + +# Save benchmark results for tracking and visualization +BenchmarkTools.save("output.json", median(results)) + +end # module