diff --git a/.github/actions/run-benchmarks-all-runtime-versions/action.yml b/.github/actions/run-benchmarks-all-runtime-versions/action.yml new file mode 100644 index 000000000..d3b9cded7 --- /dev/null +++ b/.github/actions/run-benchmarks-all-runtime-versions/action.yml @@ -0,0 +1,59 @@ +name: Run benchmarks for all runtime versions + +inputs: + scheduler: + type: string # A comma-separated list (no spaces!) is allowed. + required: true + feature-branch: + type: string + required: true + num-workers: + type: string # Provide a comma-separated list without spaces. + description: The number of workers to be used by the scheduler. + required: true + + +outputs: + output-files: + description: "The CSV files summarizing all benchmark results, separated by spaces" + value: ${{ steps.list-output-files.outputs.output-files }} + + +runs: + using: "composite" + steps: + - name: Run C Benchmarks (feature branch) + id: benchmarks-feature + uses: lf-lang/reactor-c/.github/actions/run-benchmarks@automated-full-benchmark + with: + scheduler: ${{ inputs.scheduler }} + runtime-ref: ${{ inputs.feature-branch }} + target: c + num-workers: ${{ inputs.num-workers }} + + - name: Run C Benchmarks (main branch) + id: benchmarks-main + uses: lf-lang/reactor-c/.github/actions/run-benchmarks@automated-full-benchmark + with: + scheduler: GEDF_NP,NP,adaptive + runtime-ref: main + target: c + num-workers: ${{ inputs.num-workers }} + + - name: Run C++ Benchmarks (main branch) + id: benchmarks-main-cpp + uses: lf-lang/reactor-c/.github/actions/run-benchmarks@automated-full-benchmark + with: + scheduler: default + runtime-ref: master + target: cpp + num-workers: ${{ inputs.num-workers }} + + - name: List Output Files + id: list-output-files + run: | + echo "::set-output name=output-files::\ + ${{ steps.benchmarks-feature.outputs.output-file }} \ + ${{ steps.benchmarks-main.outputs.output-file }} \ + ${{ steps.benchmarks-main-cpp.outputs.output-file }}" + shell: bash diff --git a/.github/actions/run-benchmarks/action.yml b/.github/actions/run-benchmarks/action.yml new file mode 100644 index 000000000..7904a1fca --- /dev/null +++ b/.github/actions/run-benchmarks/action.yml @@ -0,0 +1,110 @@ +name: Run benchmarks +description: Run benchmarks and collect results + +inputs: + scheduler: + type: string # A comma-separated list (no spaces!) is allowed. + required: true + runtime-ref: + type: string + required: true + target: + type: string + num-workers: + type: string # Provide a comma-separated list without spaces. + description: The number of workers to be used by the scheduler. + required: true + iterations: + type: number + description: The number of iterations with which to run each benchmark. + required: false + default: 12 + repo-for-saving-data: + description: | + The directory name of the repository where graphs and CSVs from benchmarks should be saved. + type: string + default: benchmarks-lingua-franca + errors-file: + description: | + The file in which to write any error messages. + type: string + default: errors.txt + + +outputs: + output-file: + description: "The CSV file summarizing all benchmark results" + value: ${{ steps.compute-output-file.outputs.output-file }} + + +runs: + using: "composite" + steps: + - name: Check out requested branches + id: check-out + run: | + echo $(ls) + echo "Checking out ${{ inputs.runtime-ref }}" + SUBMODULE_DIR=org.lflang/src/lib/${{ inputs.target }}/reactor-${{ inputs.target }} + cd lf/$SUBMODULE_DIR && \ + git checkout ${{ inputs.runtime-ref }} && cd ../../../../.. + git checkout $(cat $SUBMODULE_DIR/lingua-franca-ref.txt) + echo "::set-output name=submodule-dir::$SUBMODULE_DIR" + cd .. + shell: bash + + - name: Compute output file + id: compute-output-file + run: | + SHA=$(cd lf/${{ steps.check-out.outputs.submodule-dir }} && \ + git rev-parse --short=7 HEAD && cd ../../../../../..) + OUTPUT_DIR=csvs/$(date -I) + mkdir -p ${{ inputs.repo-for-saving-data }}/csvs + OUTPUT_FILENAME=${{ inputs.scheduler }}-${{ inputs.num-workers }}-${{ inputs.runtime-ref }}-${SHA:0:7}.csv + cd ${{ inputs.repo-for-saving-data }} + EXISTING_FILE=$(find ./csvs -name $OUTPUT_FILENAME | head -n 1) + cd .. + if [[ -z $EXISTING_FILE ]]; then + mkdir -p ${{ inputs.repo-for-saving-data }}/$OUTPUT_DIR + echo "The collected benchmark results will be saved to $OUTPUT_DIR/$OUTPUT_FILENAME." + echo "::set-output name=output-file::$OUTPUT_DIR/$OUTPUT_FILENAME" + else + echo "The benchmark will not be run because results for that commit are already saved at $EXISTING_FILE." + echo "::set-output name=file-already-exists::true" + echo "::set-output name=output-file::$EXISTING_FILE" + fi + shell: bash + + - name: Update LFC + run: | + cd lf + ./gradlew buildLfc + cd .. + shell: bash + if: ${{ ! steps.compute-output-file.outputs.file-already-exists }} + + - name: Run benchmarks + run: | + # Allow at most 1 second per iteration. This will result in timeouts on runtime versions + # that are an order of magnitude slower than the unthreaded C runtime. + ./runner/run_benchmark.py -m \ + continue_on_error=True \ + timeout=${{ inputs.iterations }} \ + iterations=${{ inputs.iterations }} \ + benchmark="glob(*)" \ + target=lf-${{ inputs.target }} \ + size=fast \ + ++target.params.scheduler=${{ inputs.scheduler }} \ + ++stacktrace=True \ + threads=${{ inputs.num-workers }} \ + | tee >(grep "\[ERROR\]" >> ${{ inputs.errors-file }}) + echo "::set-output name=output-file::$EXISTING_FILE" + shell: bash + if: ${{ ! steps.compute-output-file.outputs.file-already-exists }} + + - name: Collect benchmark results + id: collect-results + run: | + ./runner/collect_results.py latest ${{ inputs.repo-for-saving-data }}/${{ steps.compute-output-file.outputs.output-file }} + shell: bash + if: ${{ ! steps.compute-output-file.outputs.file-already-exists }} diff --git a/.github/actions/set-up-workspace/action.yml b/.github/actions/set-up-workspace/action.yml new file mode 100644 index 000000000..69b2d4036 --- /dev/null +++ b/.github/actions/set-up-workspace/action.yml @@ -0,0 +1,37 @@ + +name: Set up workspace +description: Check out repositories, download software, install LFC, etc. +runs: + using: "composite" + steps: + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: 3.8 + + - name: Checkout Lingua Franca repository + uses: actions/checkout@v2 + with: + repository: lf-lang/lingua-franca + path: lf + fetch-depth: 0 # We may need to check out older refs + submodules: recursive + + - name: Prepare LF build environment + uses: ./lf/.github/actions/prepare-build-env + + - name: Install Python dependencies + run: pip3 install -r runner/requirements.txt + shell: bash + + - name: Set LF_PATH and LF_BENCHMARKS_PATH environmental variable + run: | + echo "LF_PATH=$GITHUB_WORKSPACE/lf" >> $GITHUB_ENV + echo "LF_BENCHMARKS_PATH=$GITHUB_WORKSPACE" >> $GITHUB_ENV + shell: bash + + - name: Install eu-stack + run: | + sudo apt-get update + sudo apt-get install elfutils + shell: bash diff --git a/.github/actions/visualize-save-upload/action.yml b/.github/actions/visualize-save-upload/action.yml new file mode 100644 index 000000000..51a6a5670 --- /dev/null +++ b/.github/actions/visualize-save-upload/action.yml @@ -0,0 +1,96 @@ +name: Visualize, Save, and Upload Benchmark Results + +inputs: + csv-files: + type: string + required: true + token: + type: string + description: Personal access token + required: true + id: + type: string + description: String that can be used in a file name to distinguish this from other similar runs. + required: true + finalize: + description: Whether this is the last run of this workflow to edit the same comment. + type: boolean + default: false + repo-for-saving-data: + description: | + The directory name of the repository where graphs and CSVs from benchmarks should be saved. + type: string + default: benchmarks-lingua-franca + +runs: + using: "composite" + steps: + - name: Make figure + run: | + OUTPUT_DIR=images/$(date -I) + echo "Figure will be created in $OUTPUT_DIR" + SHA=${{ github.sha }} + OUTPUT_FILE=$OUTPUT_DIR/$SCHEDULER-$FEATURE_BRANCH-${{ inputs.id }}-${SHA:0:7}.png + echo "Figure will be named $OUTPUT_FILE" + mkdir -p ${{ inputs.repo-for-saving-data }}/$OUTPUT_DIR + cd ${{ inputs.repo-for-saving-data }} + ../runner/make-graphics.py ${{ inputs.csv-files }} $OUTPUT_FILE + cd .. + echo "::set-output name=output-file::$OUTPUT_FILE" + shell: bash + id: figure + + - name: Commit figure and output files + run: | + cd ${{ inputs.repo-for-saving-data }} + git config --global user.email "41898282+github-actions[bot]@users.noreply.github.com" + git config --global user.name "github-actions[bot]" + git fetch + git add -f ${{ inputs.csv-files }} ${{ steps.figure.outputs.output-file }} + if [[ $(git status) != *"nothing to commit"* ]]; then + git commit -m "benchmark ${{ github.sha }}" + git push https://token:${{ inputs.token }}@github.com/lf-lang/${{ inputs.repo-for-saving-data }}.git + fi + cd .. + shell: bash + + - name: Compute error messages + id: compute-error-messages + run: | + echo $(ls) + if [ -f errors.txt ]; then + ERROR_MESSAGES="The benchmark runner reported the following errors:"$'\n'"$(cat errors.txt)" + echo "$ERROR_MESSAGES" + echo 'ERROR_MESSAGES<> $GITHUB_ENV + echo "$ERROR_MESSAGES" >> $GITHUB_ENV + echo 'EOF' >> $GITHUB_ENV + fi + shell: bash + + - name: Comment PR + uses: thollander/actions-comment-pull-request@v1 + with: + message: | + Running benchmarks from commit ${{ github.sha }}... + ${{ env.ERROR_MESSAGES }} + + Results so far: + + ![Visualization of the benchmark results.](https://raw.githubusercontent.com/lf-lang/${{ inputs.repo-for-saving-data }}/saved-benchmark-results/${{ steps.figure.outputs.output-file }}) + comment_includes: ${{ github.sha }} + pr_number: ${{ env.PR_NUMBER }} + GITHUB_TOKEN: ${{ inputs.token }} + if: ${{ inputs.finalize == 'false' }} + + - name: Finalize Comment + uses: thollander/actions-comment-pull-request@v1 + with: + message: | + :heavy_check_mark: Finished running benchmarks from ${{ github.sha }}. + ${{ env.ERROR_MESSAGES }} + + ![Visualization of the benchmark results.](https://raw.githubusercontent.com/lf-lang/${{ inputs.repo-for-saving-data }}/saved-benchmark-results/${{ steps.figure.outputs.output-file }}) + comment_includes: ${{ github.sha }} + pr_number: ${{ env.PR_NUMBER }} + GITHUB_TOKEN: ${{ inputs.token }} + if: ${{ inputs.finalize == 'true' }} diff --git a/.github/workflows/continuous-benchmark.yml b/.github/workflows/continuous-benchmark.yml new file mode 100644 index 000000000..997d8e595 --- /dev/null +++ b/.github/workflows/continuous-benchmark.yml @@ -0,0 +1,77 @@ +name: Continuous Benchmarking + + +on: + push: + branches: [continuous-benchmarking] #FIXME: delete this line before merge + pull_request: + workflow_dispatch: + + +permissions: + contents: write + deployments: write + + +jobs: + benchmark: + name: Run C Benchmarks + runs-on: ubuntu-latest #FIXME: change to self-hosted after russel is set up. + + steps: + - name: Checkout benchmark repository + uses: actions/checkout@v2 + with: + repository: lf-lang/benchmarks-lingua-franca + ref: automated-full-benchmark # FIXME: delete this line before merge + + - name: Set up workspace + uses: lf-lang/reactor-c/.github/actions/set-up-workspace@automated-full-benchmark + + - name: Update LFC + run: | + cd lf + ./gradlew buildLfc + cd .. + shell: bash + + - name: Run C Benchmarks (multithreaded) + run: | + python3 runner/run_benchmark.py -m continue_on_error=True iterations=12 \ + benchmark="glob(*)" target=lf-c problem_size=small \ + target.params.scheduler=GEDF_NP,NP,adaptive threads=0 + + - name: Collect results + run: python3 runner/collect_results.py continuous-benchmarking-results-multi-threaded.json + + - name: Store Benchmark Result + uses: benchmark-action/github-action-benchmark@v1 + with: + name: Lingua Franca C Benchmark -- Multithreaded + tool: customSmallerIsBetter + output-file-path: continuous-benchmarking-results-multi-threaded.json + github-token: ${{ secrets.GITHUB_TOKEN }} + auto-push: true + alert-threshold: '200%' # FIXME: After russel is set up, lower the threshold + comment-on-alert: true + fail-on-alert: false + + - name: Run C Benchmarks (unthreaded) + run: | + python3 runner/run_benchmark.py -m continue_on_error=True iterations=12 problem_size=small \ + benchmark="glob(*)" target=lf-c-unthreaded + + - name: Collect results + run: python3 runner/collect_results.py continuous-benchmarking-results-single-threaded.json + + - name: Store Benchmark Result + uses: benchmark-action/github-action-benchmark@v1 + with: + name: Lingua Franca C Benchmark -- Single-Threaded + tool: customSmallerIsBetter + output-file-path: continuous-benchmarking-results-single-threaded.json + github-token: ${{ secrets.GITHUB_TOKEN }} + auto-push: true + alert-threshold: '200%' # FIXME: After russel is set up, lower the threshold + comment-on-alert: true + fail-on-alert: false diff --git a/.github/workflows/user-requested-benchmark.yml b/.github/workflows/user-requested-benchmark.yml new file mode 100644 index 000000000..976f31944 --- /dev/null +++ b/.github/workflows/user-requested-benchmark.yml @@ -0,0 +1,87 @@ +name: User-requested benchmarks + +on: + issue_comment: + types: [created] + push: # TODO: Remove this when this is in the default branch. + workflow_dispatch: + +env: # TODO: Do not hard-code these! + PR_NUMBER: 91 + SCHEDULER: NP + FEATURE_BRANCH: automated-full-benchmark + +permissions: + contents: write + pull-requests: write + +jobs: + run_benchmarks: + runs-on: ubuntu-latest + name: Post benchmark results in a PR. + steps: + - name: Checkout benchmark repository + uses: actions/checkout@v2 + with: + repository: lf-lang/benchmarks-lingua-franca + token: ${{ secrets.GITHUB_TOKEN }} + fetch-depth: 1 + ref: automated-full-benchmark # FIXME: delete this line after merge + + - name: Checkout benchmarks-lingua-franca + uses: actions/checkout@v2 + with: + repository: lf-lang/benchmarks-lingua-franca + token: ${{ secrets.PAT_FOR_PUSHING_BENCHMARK_RESULTS }} + fetch-depth: 0 # It will be necessary to push to this repo + ref: saved-benchmark-results + path: benchmarks-lingua-franca + + - name: Set up workspace + uses: lf-lang/reactor-c/.github/actions/set-up-workspace@automated-full-benchmark + + - name: Run Benchmarks Part 1 + id: run-benchmarks-1 + uses: lf-lang/reactor-c/.github/actions/run-benchmarks-all-runtime-versions@automated-full-benchmark + with: + scheduler: ${{ env.SCHEDULER }} + feature-branch: ${{ env.FEATURE_BRANCH }} + num-workers: 1,12 + + - name: Visualize, Save, and Upload + uses: lf-lang/reactor-c/.github/actions/visualize-save-upload@automated-full-benchmark + with: + csv-files: ${{ steps.run-benchmarks-1.outputs.output-files }} + token: ${{ secrets.PAT_FOR_PUSHING_BENCHMARK_RESULTS }} + id: 1 + + - name: Run Benchmarks Part 2 + id: run-benchmarks-2 + uses: lf-lang/reactor-c/.github/actions/run-benchmarks-all-runtime-versions@automated-full-benchmark + with: + scheduler: ${{ env.SCHEDULER }} + feature-branch: ${{ env.FEATURE_BRANCH }} + num-workers: 3,6,24 + + - name: Visualize, Save, and Upload + uses: lf-lang/reactor-c/.github/actions/visualize-save-upload@automated-full-benchmark + with: + csv-files: "${{ steps.run-benchmarks-1.outputs.output-files }} ${{ steps.run-benchmarks-2.outputs.output-files }}" + token: ${{ secrets.PAT_FOR_PUSHING_BENCHMARK_RESULTS }} + id: 2 + + - name: Run Benchmarks Part 3 + id: run-benchmarks-3 + uses: lf-lang/reactor-c/.github/actions/run-benchmarks-all-runtime-versions@automated-full-benchmark + with: + scheduler: ${{ env.SCHEDULER }} + feature-branch: ${{ env.FEATURE_BRANCH }} + num-workers: 2,4,9,18 + + - name: Visualize, Save, and Upload + uses: lf-lang/reactor-c/.github/actions/visualize-save-upload@automated-full-benchmark + with: + csv-files: "${{ steps.run-benchmarks-1.outputs.output-files }} ${{ steps.run-benchmarks-2.outputs.output-files }} ${{ steps.run-benchmarks-3.outputs.output-files }}" + token: ${{ secrets.PAT_FOR_PUSHING_BENCHMARK_RESULTS }} + id: 3 + finalize: true