diff --git a/.config/mill-version b/.config/mill-version index 17efad1a223..274afb051be 100644 --- a/.config/mill-version +++ b/.config/mill-version @@ -1 +1 @@ -0.12.0-RC1 +0.12.5-68-e4bf78-native \ No newline at end of file diff --git a/.editorconfig b/.editorconfig index 335ea63588f..a683ed60802 100644 --- a/.editorconfig +++ b/.editorconfig @@ -13,3 +13,16 @@ insert_final_newline = true charset = utf-8 indent_style = space indent_size = 2 + +[*.{kt,kts}] +ktlint_code_style = intellij_idea +ktlint_standard_no-wildcard-imports = disabled + +[example/kotlinlib/linting/**/*] +ktlint = disabled + +[kotlinlib/test/resources/contrib/ktfmt/**/*] +ktlint = disabled + +[kotlinlib/test/resources/kotlin-js/foo/test/src/foo/**/*] +ktlint = disabled diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 6fa6e4c0ed7..970ab0049f8 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -49,3 +49,7 @@ ceef92f5d6f3d4066564d41b5165351df7cb5fbb # Reformatted source code 9768c664a9d635d5755260f637e253d9f4c288ad + +# Format more code +39e36746ce8b5a254e0c276bc3cdbff9842d1224 + diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index d75fe1f170a..4bcb0932d64 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -1,4 +1,7 @@ -Please don't open issues for questions, but ask in our Discussions forum at https://github.com/com-lihaoyi/mill/discussions or Discord channel at https://discord.com/channels/632150470000902164/940067748103487558 +Please don't open issues for questions, but ask in our Discussions forum at https://github.com/com-lihaoyi/mill/discussions Mill installations via `coursier` or `cs` are unsupported. +Please open all PRs as drafts to avoid being bottlenecked by Mill CI, and only +convert to ready for review once CI on your own fork is green. There will be a +PR status check linking your fork's commit/CI history for convenient viewing \ No newline at end of file diff --git a/.github/actions/post-build-setup/action.yml b/.github/actions/post-build-setup/action.yml new file mode 100644 index 00000000000..d449020d496 --- /dev/null +++ b/.github/actions/post-build-setup/action.yml @@ -0,0 +1,31 @@ +# action.yml +inputs: + java-version: + required: true + type: string + + timeout-minutes: + default: 60 + type: number + + os: + type: string + +runs: + using: "composite" + steps: + - run: echo temurin:${{ inputs.java-version }} > .mill-jvm-version + shell: bash + + # Need to fix cached artifact file permissions because github actions screws it up + # https://github.com/actions/upload-artifact/issues/38 + - run: "chmod -R 777 ." + shell: bash + + - uses: coursier/cache-action@v6 + + - run: git config --global user.email "you@example.com" + shell: bash + + - run: git config --global user.name "Your Name" + shell: bash diff --git a/.github/workflows/draft-ci.yml b/.github/workflows/draft-ci.yml new file mode 100644 index 00000000000..a9b9fcee385 --- /dev/null +++ b/.github/workflows/draft-ci.yml @@ -0,0 +1,34 @@ +name: Draft CI + +permissions: write-all +on: + pull_request_target: + types: + - opened + - reopened + - synchronize + - ready_for_review + +jobs: + run: + runs-on: ubuntu-latest + permissions: write-all + steps: + - name: Debug Echos + run: | + echo ${{ github.event.action }} + echo ${{ github.event.action == 'ready_for_review' }} + echo "${{ github.event.pull_request.head.repo.html_url }}/commits/${{github.event.pull_request.head.ref}}" + - name: Create status + run: | + curl --request POST \ + --url ${{ github.event.pull_request.statuses_url }} \ + --header 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' \ + --header 'content-type: application/json' \ + --data '{ + "state": "${{(github.event.action != 'ready_for_review' && github.event.pull_request.draft) && 'pending' || 'success'}}", + "context": "Draft CI / link", + "target_url": ${{(github.event.action != 'ready_for_review' && github.event.pull_request.draft) && format('"{0}/commits/{1}"', github.event.pull_request.head.repo.html_url, github.event.pull_request.head.ref) || 'null'}}, + "description": "${{(github.event.action != 'ready_for_review' && github.event.pull_request.draft) && 'use CI on your repo fork (link on right) until this PR is ready for review' || 'PR is ready for review, running CI in Mill repo'}}" + }' \ + --fail-with-body diff --git a/.github/workflows/draft-status.yml b/.github/workflows/draft-status.yml deleted file mode 100644 index 9290dbb9a15..00000000000 --- a/.github/workflows/draft-status.yml +++ /dev/null @@ -1,28 +0,0 @@ -name: Draft Status - -permissions: write-all -on: - pull_request_target: - -jobs: - set_status: - if: github.event.pull_request.draft == true - runs-on: ubuntu-latest - permissions: write-all - steps: - - name: Get PR source repository URL - run: - echo "${{ github.event.pull_request.head.repo.html_url }}/commits/${{github.event.pull_request.head.ref}}" - - name: Create status - run: | - curl --request POST \ - --url ${{ github.event.pull_request.statuses_url }} \ - --header 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' \ - --header 'content-type: application/json' \ - --data '{ - "state": "pending", - "context": "Link to Fork Repo Actions", - "target_url": "${{ github.event.pull_request.head.repo.html_url }}/commits/${{github.event.pull_request.head.ref}}", - "description": "use fork github actions until ready for review", - }' \ - --fail-with-body diff --git a/.github/workflows/post-build-raw.yml b/.github/workflows/post-build-raw.yml new file mode 100644 index 00000000000..c3ea9a8759a --- /dev/null +++ b/.github/workflows/post-build-raw.yml @@ -0,0 +1,34 @@ +name: post-build-raw +on: + workflow_call: + inputs: + buildcmd: + default: '' + type: string + java-version: + required: true + type: string + os: + default: 'ubuntu-latest' + type: string + timeout-minutes: + default: 60 + type: number + +jobs: + run: + runs-on: ${{ inputs.os }} + continue-on-error: false + timeout-minutes: ${{ inputs.timeout-minutes }} + steps: + - uses: actions/download-artifact@v4 + with: + path: . + name: ${{ inputs.os }}-artifact + + - uses: ./.github/actions/post-build-setup + with: + java-version: ${{ inputs.java-version }} + os: ${{ inputs.os }} + + - run: ${{ inputs.buildcmd }} diff --git a/.github/workflows/post-build-selective.yml b/.github/workflows/post-build-selective.yml new file mode 100644 index 00000000000..699929ea56d --- /dev/null +++ b/.github/workflows/post-build-selective.yml @@ -0,0 +1,100 @@ +name: post-build-selective +on: + workflow_call: + inputs: + millargs: + default: '' + type: string + coursierarchive: + default: '' + type: string + java-version: + required: true + type: string + os: + default: 'ubuntu-latest' + type: string + timeout-minutes: + default: 60 + type: number + install-android-sdk: + default: false + type: boolean + +jobs: + run: + runs-on: ${{ inputs.os }} + continue-on-error: false + timeout-minutes: ${{ inputs.timeout-minutes }} + steps: + + - uses: actions/download-artifact@v4 + with: + path: . + name: ${{ inputs.os }}-artifact + + - uses: ./.github/actions/post-build-setup + with: + java-version: ${{ inputs.java-version }} + os: ${{ inputs.os }} + + - uses: actions/download-artifact@v4 + with: + path: out/mill-selective-execution + name: ${{ inputs.os }}-selective-execution-artifact + + - run: mv out/mill-selective-execution/mill-selective-execution.json out/mill-selective-execution.json + + - uses: actions/setup-node@v4 + with: + node-version: '22' + + - uses: android-actions/setup-android@v3 + if: ${{ inputs.install-android-sdk }} + with: + log-accepted-android-sdk-licenses: false + cmdline-tools-version: 11076708 + packages: tools platform-tools emulator system-images;android-35;google_apis_playstore;x86_64 + + - name: Enable KVM group perms + if: ${{ inputs.install-android-sdk }} + run: | + echo 'KERNEL=="kvm", GROUP="kvm", MODE="0666", OPTIONS+="static_node=kvm"' | sudo tee /etc/udev/rules.d/99-kvm4all.rules + sudo udevadm control --reload-rules + sudo udevadm trigger --name-match=kvm + + - name: Cleanup any previous avd's to avoid signing key conflicts + if : ${{ inputs.install-android-sdk }} + run: rm -rf /home/runner/.config/.android/avd + + - name: Set AVD environment variable globally + if: ${{ inputs.install-android-sdk }} + run: echo "ANDROID_AVD_HOME=/home/runner/.config/.android/avd" >> $GITHUB_ENV + + - run: ./mill -i -k selective.resolve ${{ inputs.millargs }} + + - run: ./mill -i -j1 -k selective.run ${{ inputs.millargs }} + if: ${{ inputs.install-android-sdk }} + env: + COURSIER_ARCHIVE_CACHE: ${{ inputs.coursierarchive }} + + - run: ./mill -i -k selective.run ${{ inputs.millargs }} + if: ${{ !inputs.install-android-sdk }} + env: + COURSIER_ARCHIVE_CACHE: ${{ inputs.coursierarchive }} + + - run: 'taskkill -f -im java* && rm -rf out/mill-server/*' + if: startsWith(inputs.os, 'windows') + shell: bash + continue-on-error: true + + - name: Publish Test Report + uses: mikepenz/action-junit-report@v5 + if: always() # always run even if the previous step fails + with: + fail_on_failure: false + include_passed: false + detailed_summary: true + annotate_only: true + require_tests: false + report_paths: 'out/**/test-report.xml' diff --git a/.github/workflows/pre-build.yml b/.github/workflows/pre-build.yml new file mode 100644 index 00000000000..ba41e55506e --- /dev/null +++ b/.github/workflows/pre-build.yml @@ -0,0 +1,61 @@ +on: + workflow_call: + inputs: + compileargs: + default: '__.compile' + type: string + prepareargs: + default: '__.test' + type: string + java-version: + default: '11' + type: string + os: + type: string + timeout-minutes: + default: 60 + type: number + +jobs: + run: + runs-on: ${{ inputs.os }} + timeout-minutes: ${{ inputs.timeout-minutes }} + steps: + - uses: actions/setup-java@v4 + with: + java-version: ${{ inputs.java-version }} + distribution: temurin + + - uses: actions/checkout@v4 + with: + ref: ${{ github.base_ref }} + + - run: echo temurin:${{ inputs.java-version }} > .mill-jvm-version + + - run: chmod -R 777 . # normalize permissions before and after upload/download-artifact + + - run: mkdir out && touch out/mill-selective-execution.json + shell: bash + + - run: cat .mill-jvm-version + + - run: ./mill -i -k selective.prepare ${{ inputs.prepareargs }} + if: ${{ github.event_name == 'pull_request' && !contains(github.event.pull_request.labels.*.name, 'run-all-tests') }} + + - uses: actions/upload-artifact@v4.5.0 + with: + path: out/mill-selective-execution.json + name: ${{ inputs.os }}-selective-execution-artifact + include-hidden-files: true + + - uses: actions/checkout@v4 + + - uses: coursier/cache-action@v6 + + - run: ./mill -i -k ${{ inputs.compileargs }} + + - uses: actions/upload-artifact@v4.5.0 + with: + path: . + name: ${{ inputs.os }}-artifact + include-hidden-files: true diff --git a/.github/workflows/publish-artifacts.yml b/.github/workflows/publish-artifacts.yml index c1d7a71a77e..f0a48a7a29f 100644 --- a/.github/workflows/publish-artifacts.yml +++ b/.github/workflows/publish-artifacts.yml @@ -15,9 +15,32 @@ on: workflow_dispatch: jobs: + build-artifacts: + # when in master repo, publish all tags and manual runs on main + if: github.repository == 'com-lihaoyi/mill' + runs-on: ubuntu-latest + env: + MILL_STABLE_VERSION: 1 + steps: + - uses: actions/checkout@v4 + with: {fetch-depth: 0} + + - uses: coursier/cache-action@v6 + + - run: "echo temurin:11 > .mill-jvm-version" + + - run: ./mill -i __.publishArtifacts + + - uses: actions/upload-artifact@v4.5.0 + with: + path: . + include-hidden-files: true + name: publish-artifacts + publish-sonatype: # when in master repo, publish all tags and manual runs on main if: github.repository == 'com-lihaoyi/mill' + needs: build-artifacts runs-on: ubuntu-latest # only run one publish job for the same sha at the same time @@ -25,6 +48,7 @@ jobs: concurrency: publish-sonatype-${{ github.sha }} env: + MILL_STABLE_VERSION: 1 MILL_SONATYPE_USERNAME: ${{ secrets.SONATYPE_USERNAME }} MILL_SONATYPE_PASSWORD: ${{ secrets.SONATYPE_PASSWORD }} MILL_PGP_SECRET_BASE64: ${{ secrets.SONATYPE_PGP_SECRET }} @@ -34,17 +58,58 @@ jobs: LC_ALL: "en_US.UTF-8" steps: - - uses: actions/checkout@v4 - with: {fetch-depth: 0} + - uses: actions/download-artifact@v4 + with: + path: . + name: publish-artifacts + + - run: ls -la . + + # Need to fix cached artifact file permissions because github actions screws it up + # https://github.com/actions/upload-artifact/issues/38 + # Best is, we restore any file to avoid any changes + - run: git reset --hard - uses: coursier/cache-action@v6 - - uses: actions/setup-java@v4 - with: - java-version: '11' - distribution: temurin + - run: "echo temurin:11 > .mill-jvm-version" + + - run: ./mill -i mill.scalalib.PublishModule/ + + publish-sonatype-native: + # when in master repo, publish all tags and manual runs on main + if: github.repository == 'com-lihaoyi/mill' + runs-on: ${{ matrix.os }} + + # only run one publish job for the same sha at the same time + # e.g. when a main-branch push is also tagged + concurrency: publish-sonatype-native-${{ matrix.os }}-${{ github.sha }} + strategy: + matrix: + include: + - os: macos-latest + coursierarchive: "" + - os: windows-latest + coursierarchive: C:/coursier-arc + # Skip this because the main publishing job handles it + # - os: ubuntu-latest + env: + MILL_STABLE_VERSION: 1 + MILL_SONATYPE_USERNAME: ${{ secrets.SONATYPE_USERNAME }} + MILL_SONATYPE_PASSWORD: ${{ secrets.SONATYPE_PASSWORD }} + MILL_PGP_SECRET_BASE64: ${{ secrets.SONATYPE_PGP_SECRET }} + MILL_PGP_PASSPHRASE: ${{ secrets.SONATYPE_PGP_PASSWORD }} + LANG: "en_US.UTF-8" + LC_MESSAGES: "en_US.UTF-8" + LC_ALL: "en_US.UTF-8" + COURSIER_ARCHIVE_CACHE: ${{ matrix.coursierarchive }} + steps: + - uses: actions/checkout@v4 + with: { fetch-depth: 0 } + + - run: "echo temurin:11 > .mill-jvm-version" - - run: ci/release-maven.sh + - run: ./mill -i mill.scalalib.PublishModule/ --publishArtifacts dist.native.publishArtifacts release-github: # when in master repo, publish all tags and manual runs on main @@ -53,17 +118,24 @@ jobs: runs-on: ubuntu-latest env: + MILL_STABLE_VERSION: 1 REPO_ACCESS_TOKEN: ${{ secrets.REPO_ACCESS_TOKEN }} steps: - - uses: actions/checkout@v4 - with: {fetch-depth: 0} + - uses: actions/download-artifact@v4 + with: + path: . + name: publish-artifacts + + - run: ls -la . + + # Need to fix cached artifact file permissions because github actions screws it up + # https://github.com/actions/upload-artifact/issues/38 + # Best is, we restore any file to avoid any changes + - run: git reset --hard - uses: coursier/cache-action@v6 - - uses: actions/setup-java@v4 - with: - java-version: '11' - distribution: temurin + - run: "echo temurin:11 > .mill-jvm-version" - - run: ./mill -i uploadToGithub --authKey $REPO_ACCESS_TOKEN + - run: ./mill -i dist.uploadToGithub --authKey $REPO_ACCESS_TOKEN diff --git a/.github/workflows/publish-bridges.yml b/.github/workflows/publish-bridges.yml deleted file mode 100644 index 27e44d848aa..00000000000 --- a/.github/workflows/publish-bridges.yml +++ /dev/null @@ -1,40 +0,0 @@ -name: Publish Bridges - -# Manually-triggered github action to publish mill-scala-compiler-bridges jars, -# since those do not change frequently enough to be worth including in the main -# publishing workflow that runs every Mill version -on: - workflow_dispatch: - inputs: - bridge_versions: - description: 'comma-separated list of Scala versions to publish or `all` for all supported versions' - required: true - type: string - -jobs: - publish-bridges: - runs-on: ubuntu-latest - - concurrency: publish-sonatype-${{ github.sha }} - - env: - SONATYPE_PGP_SECRET: ${{ secrets.SONATYPE_PGP_SECRET }} - SONATYPE_USERNAME: ${{ secrets.SONATYPE_DEPLOY_USER }} - SONATYPE_PASSWORD: ${{ secrets.SONATYPE_DEPLOY_PASSWORD }} - SONATYPE_PGP_PASSWORD: ${{ secrets.SONATYPE_PGP_PASSWORD }} - LANG: "en_US.UTF-8" - LC_MESSAGES: "en_US.UTF-8" - LC_ALL: "en_US.UTF-8" - MILL_COMPILER_BRIDGE_VERSIONS: ${{ inputs.bridge_versions }} - - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - uses: actions/setup-java@v4 - with: - java-version: '11' - distribution: temurin - - - run: ci/release-bridge-maven.sh diff --git a/.github/workflows/publish-docs.yml b/.github/workflows/publish-docs.yml index 3cfa5bb668e..da24030d66a 100644 --- a/.github/workflows/publish-docs.yml +++ b/.github/workflows/publish-docs.yml @@ -7,6 +7,12 @@ on: push: branches: - main + +# Ensure only one job run is publishing docs at any point in time. We can cancel +# old runs since any newer run renders older ones irrelevant +concurrency: + group: publish-docs + jobs: publishDocs: if: github.repository == 'com-lihaoyi/mill' @@ -20,9 +26,6 @@ jobs: - uses: coursier/cache-action@v6 - - uses: actions/setup-java@v4 - with: - java-version: '11' - distribution: temurin + - run: "echo temurin:11 > .mill-jvm-version" - run: ci/publish-docs.sh diff --git a/.github/workflows/run-mill-action.yml b/.github/workflows/run-mill-action.yml deleted file mode 100644 index 9d667968e14..00000000000 --- a/.github/workflows/run-mill-action.yml +++ /dev/null @@ -1,90 +0,0 @@ -name: Build with Mill - -on: - workflow_call: - inputs: - buildcmd: - default: '' - type: string - millargs: - default: '' - type: string - java-version: - required: true - type: string - os: - default: 'ubuntu-latest' - type: string - continue-on-error: - default: false - type: boolean - populate_cache: - default: false - type: boolean - timeout-minutes: - default: 60 - type: number - env-bridge-versions: - default: 'none' - type: string - -jobs: - run: - runs-on: ${{ inputs.os }} - continue-on-error: ${{ inputs.continue-on-error }} - timeout-minutes: ${{ inputs.timeout-minutes }} - env: - MILL_COMPILER_BRIDGE_VERSIONS: ${{ inputs.env-bridge-versions }} - - steps: - - uses: actions/checkout@v4 - if: ${{ inputs.populate_cache }} - - - uses: actions/download-artifact@v4 - if: ${{ !inputs.populate_cache }} - with: - path: . - name: ${{ inputs.os }}-artifact - - # Need to fix cached artifact file permissions because github actions screws it up - # https://github.com/actions/upload-artifact/issues/38 - - name: chmod executable - run: "chmod -R +x ." - - - uses: coursier/cache-action@v6 - - - uses: actions/setup-java@v4 - with: - java-version: ${{ inputs.java-version }} - distribution: temurin - - - name: Prepare git config - run: | - git config --global user.name "Mill GithHub Actions" - git config --global user.email "mill-ci@localhost" - - - name: Run '${{ inputs.buildcmd }}' - run: ${{ inputs.buildcmd }} - if: inputs.buildcmd != '' - - - name: Run Mill '${{ inputs.millargs }}' - # Mill tests are pretty heavy so run them only 3x parallel on 4 core Github Actions runners - run: ./mill -i -j3 -k ${{ inputs.millargs }} - if: inputs.millargs != '' && !startsWith(inputs.os, 'windows') - - - name: Run Mill (on Windows) '${{ inputs.millargs }}' - run: cmd /C %GITHUB_WORKSPACE%\ci\mill.bat -i -j3 -k ${{ inputs.millargs }} - if: inputs.millargs != '' && startsWith(inputs.os, 'windows') - - - name: Run Mill (on Windows) Worker Cleanup - run: 'taskkill -f -im java* && rm -rf out/mill-worker-*' - if: inputs.millargs != '' && startsWith(inputs.os, 'windows') - shell: bash - continue-on-error: true - - - uses: actions/upload-artifact@v4.3.5 - with: - path: . - name: ${{ inputs.os }}-artifact - include-hidden-files: true - if: ${{ inputs.populate_cache }} \ No newline at end of file diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml index 1165cfa3093..429e075af00 100644 --- a/.github/workflows/run-tests.yml +++ b/.github/workflows/run-tests.yml @@ -1,13 +1,33 @@ +# Uncommment this to replace the rest of the file when you want to debug stuff in CI + +# +#name: Run Debug +# +#on: +# push: +# pull_request: +# workflow_dispatch: +# +#jobs: +# debug-windows: +## runs-on: ubuntu-latest +# runs-on: windows-latest +# steps: +# - uses: actions/checkout@v4 +# with: { fetch-depth: 1 } +# +# - run: ./mill 'example.scalalib.basic[1-simple].packaged.fork.test' +# env: +# COURSIER_ARCHIVE_CACHE: "C:/coursier-arc" + + name: Run Tests # We run full CI on push builds to main and on all pull requests # -# Manual builds (workflow_dispatch) to the main branch are also published -# -# To maximize bug-catching changes while keeping CI times reasonable, we run: -# - All tests on Linux/Java17 -# - Fewer tests on Linux/Java11 and Windows/Java17 -# - Fewest tests on Windows/Java11 +# To maximize bug-catching changes while keeping CI times reasonable, we run +# all tests on Linux, scattered between Java 11/17, except for one job run +# on MacOS instead and a subset of jobs also run on windows on: push: @@ -29,30 +49,39 @@ jobs: # Jobs are listed in rough order of priority: if multiple jobs fail, the first job # in the list should be the one that's most worth looking into build-linux: - if: github.event.pull_request.draft == false - uses: ./.github/workflows/run-mill-action.yml + if: (github.event.action == 'ready_for_review') || (github.event.pull_request.draft == false) + uses: ./.github/workflows/pre-build.yml with: - java-version: '11' - millargs: __.compile - populate_cache: true + os: ubuntu-latest build-windows: - if: github.event.pull_request.draft == false - uses: ./.github/workflows/run-mill-action.yml + if: (github.event.action == 'ready_for_review') || (github.event.pull_request.draft == false) + uses: ./.github/workflows/pre-build.yml with: os: windows-latest - java-version: '11' - millargs: __.compile - populate_cache: true test-docs: - if: github.event.pull_request.draft == false + if: (github.event.action == 'ready_for_review') || (github.event.pull_request.draft == false) runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 with: { fetch-depth: 0 } - - run: ./mill -i docs.githubPages + - run: ./mill -i docs.githubPages + docs.checkBrokenLinks + + mac: + runs-on: macos-latest + steps: + - uses: actions/checkout@v4 + with: { fetch-depth: 0 } + + - run: "echo temurin:17 > .mill-jvm-version" + + - uses: actions/setup-node@v4 + with: + node-version: '22' + + - run: ./mill -i example.scalalib.__.local.server.test linux: needs: build-linux @@ -64,39 +93,68 @@ jobs: # For most tests, run them arbitrarily on Java 11 or Java 17 on Linux, and # on the opposite version on Windows below, so we get decent coverage of # each test on each Java version and each operating system - # We also try to group tests together to manuaully balance out the runtimes of each jobs + # We also try to group tests together to manually balance out the runtimes of each jobs + - java-version: 17 + millargs: "'{main,scalalib,testrunner,bsp,testkit}.__.test'" + install-android-sdk: false + + - java-version: 11 + millargs: "'{scalajslib,scalanativelib,kotlinlib,pythonlib,javascriptlib}.__.test'" + install-android-sdk: false + - java-version: 17 - millargs: "'{main,scalalib,testrunner,bsp,testkit}.__.testCached'" - - java-version: '11' - millargs: "'{scalajslib,scalanativelib}.__.testCached'" + millargs: "contrib.__.test" + install-android-sdk: false + + - java-version: 17 # Run this with Mill native launcher as a smoketest + millargs: "'example.javalib.__.native.server.test'" + install-android-sdk: false + - java-version: 17 - millargs: "contrib.__.testCached" + millargs: "'example.kotlinlib.__.local.server.test'" + install-android-sdk: false - java-version: 17 - millargs: "'example.javalib.__.local.testCached'" + millargs: "'example.android.__.local.server.test'" + install-android-sdk: true + - java-version: 17 - millargs: "'example.scalalib.__.local.testCached'" - - java-version: '11' - millargs: "'example.thirdparty[{mockito,acyclic,commons-io}].local.testCached'" + millargs: "'example.{pythonlib,javascriptlib}.__.local.server.test'" + install-android-sdk: false + + - java-version: 11 + millargs: "'example.thirdparty[{mockito,acyclic,commons-io}].local.server.test'" + install-android-sdk: false + - java-version: 17 - millargs: "'example.thirdparty[{fansi,jimfs,netty,gatling}].local.testCached'" - - java-version: '11' - millargs: "'example.{depth,extending}.__.local.testCached'" + millargs: "'example.thirdparty[{fansi,jimfs,netty,gatling}].local.server.test'" + install-android-sdk: false + + - java-version: '17' + millargs: "'example.thirdparty[arrow].local.server.test'" + install-android-sdk: false + - java-version: 11 + millargs: "'example.{cli,fundamentals,depth,extending}.__.local.server.test'" + install-android-sdk: false # Most of these integration tests should not depend on which mode they # are run in, so just run them in `local` - - java-version: '11' - millargs: "'integration.{failure,feature,ide}.__.local.testCached'" - + - java-version: '17' + millargs: "'integration.{failure,feature,ide}.__.packaged.server.test'" + install-android-sdk: false # These invalidation tests need to be exercised in both execution modes # to make sure they work with and without -i/--no-server being passed - java-version: 17 - millargs: "'integration.invalidation.__.fork.testCached'" + millargs: "'integration.invalidation.__.packaged.fork.test'" + install-android-sdk: false + - java-version: 17 - millargs: "'integration.invalidation.__.server.testCached'" + millargs: "'integration.invalidation.__.packaged.server.test'" + install-android-sdk: false - uses: ./.github/workflows/run-mill-action.yml + uses: ./.github/workflows/post-build-selective.yml with: + install-android-sdk: ${{ matrix.install-android-sdk }} java-version: ${{ matrix.java-version }} millargs: ${{ matrix.millargs }} @@ -108,22 +166,30 @@ jobs: include: # just run a subset of examples/ on Windows, because for some reason running # the whole suite can take hours on windows v.s. half an hour on linux - - java-version: '11' - millargs: '"{main,scalalib,bsp}.__.testCached"' - - java-version: '11' - millargs: '"example.scalalib.{basic,web}.__.fork.testCached"' + # + # * One job unit tests, + # * One job each for local/packaged/native tests + # * At least one job for each of fork/server tests, and example/integration tests + - java-version: 11 + millargs: '"{main,scalalib,bsp}.__.test"' + + - java-version: 11 + millargs: '"example.scalalib.{basic,publishing}.__.local.fork.test"' + - java-version: 17 - millargs: "'integration.{feature,failure}[_].fork.testCached'" - - java-version: '11' - millargs: "'integration.invalidation[_].server.testCached'" - - java-version: '11' - millargs: "contrib.__.testCached" + millargs: "'integration.{feature,failure}.__.packaged.fork.test'" - uses: ./.github/workflows/run-mill-action.yml + - java-version: 11 # Run this with Mill native launcher as a smoketest + millargs: "'integration.invalidation.__.native.server.test'" + + uses: ./.github/workflows/post-build-selective.yml with: os: windows-latest java-version: ${{ matrix.java-version }} millargs: ${{ matrix.millargs }} + # Provide a shorter coursier archive folder to avoid hitting path-length bugs when + # running the graal native image binary on windows + coursierarchive: "C:/coursier-arc" itest: needs: build-linux @@ -132,30 +198,21 @@ jobs: matrix: include: # bootstrap tests - - java-version: '11' # Have one job on oldest JVM + - java-version: 11 # Have one job on oldest JVM buildcmd: ci/test-mill-dev.sh && ci/test-mill-release.sh && ./mill -i -k __.ivyDepsTree && ./mill -i -k __.ivyDepsTree --withRuntime - java-version: 17 # Have one job on default JVM buildcmd: ci/test-mill-bootstrap.sh - uses: ./.github/workflows/run-mill-action.yml + uses: ./.github/workflows/post-build-raw.yml with: java-version: ${{ matrix.java-version }} buildcmd: ${{ matrix.buildcmd }} - # Rarely breaks so run it near the end - compiler-bridge: - needs: build-linux - uses: ./.github/workflows/run-mill-action.yml - with: - java-version: '11' - millargs: bridge.__.publishLocal - env-bridge-versions: 'essential' - # Scalafmt, Mima, and Scalafix job runs last because it's the least important: - # usually just a automated or mechanical manual fix to do before merging + # usually just an automated or mechanical manual fix to do before merging lint-autofix: needs: build-linux - uses: ./.github/workflows/run-mill-action.yml + uses: ./.github/workflows/post-build-raw.yml with: - java-version: '11' - buildcmd: ./mill -i mill.scalalib.scalafmt.ScalafmtModule/checkFormatAll __.sources + __.mimaReportBinaryIssues + __.fix --check + java-version: '17' + buildcmd: ./mill -i mill.scalalib.scalafmt.ScalafmtModule/checkFormatAll __.sources + __.mimaReportBinaryIssues + __.fix --check + mill.javalib.palantirformat.PalantirFormatModule/ --check + mill.kotlinlib.ktlint.KtlintModule/checkFormatAll diff --git a/.github/workflows/scala-steward.yml b/.github/workflows/scala-steward.yml new file mode 100644 index 00000000000..358d4ecadbd --- /dev/null +++ b/.github/workflows/scala-steward.yml @@ -0,0 +1,24 @@ +on: +# schedule: +# - cron: '0 0 * * *' # daily + workflow_dispatch: + +name: Scala Steward + +permissions: + contents: write + pull-requests: write + +jobs: + scala-steward: + runs-on: ubuntu-latest + + steps: + - uses: actions/setup-java@v4 + with: + java-version: '17' + distribution: 'temurin' + + - uses: scala-steward-org/scala-steward-action@v2.72.0 + with: + mill-version: 0.12.1 diff --git a/.gitignore b/.gitignore index 3bb56edddb3..fdcdd3f8ab7 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,9 @@ mill.iml bsp.log lowered.hnir .dotty-ide* +node_modules/ +dist/ +build/ +*.bak +mill-assembly.jar +mill-native \ No newline at end of file diff --git a/.mill-jvm-version b/.mill-jvm-version new file mode 100644 index 00000000000..53e9a0785b0 --- /dev/null +++ b/.mill-jvm-version @@ -0,0 +1 @@ +zulu:17.0.13 \ No newline at end of file diff --git a/.mill-opts b/.mill-opts new file mode 100644 index 00000000000..be51e1c2201 --- /dev/null +++ b/.mill-opts @@ -0,0 +1 @@ +--jobs=0.5C \ No newline at end of file diff --git a/.scalafmt.conf b/.scalafmt.conf index 94752dfaa01..ca282ea5224 100644 --- a/.scalafmt.conf +++ b/.scalafmt.conf @@ -1,5 +1,4 @@ -# Newer versions won't work with Java 8! -version = "3.7.15" +version = "3.8.4-RC1" align.openParenCallSite = false align.preset = none @@ -22,3 +21,19 @@ project.git = true runner.dialect = scala213 +project { + excludePaths = [ + "glob:**/example/scalalib/linting/1-scalafmt/src/Foo.scala", + "glob:**/mill/out/**" + ] +} + +fileOverride { + "glob:**/example/**/build.mill*" { + docstrings.style = keep + } + "glob:**/example/**/build.sc" { + docstrings.style = keep + } + "glob:**/example/scalalib/native/**/*.scala" = scala3 +} \ No newline at end of file diff --git a/CONTRIBUTING.adoc b/CONTRIBUTING.adoc index acfa341accb..66a0c2f2a29 100644 --- a/CONTRIBUTING.adoc +++ b/CONTRIBUTING.adoc @@ -1,6 +1,6 @@ = Contributing to Mill :link-github: https://github.com/com-lihaoyi/mill -:link-chat: https://discord.com/channels/632150470000902164/940067748103487558 + Thank you for considering contributing to Mill. @@ -16,6 +16,6 @@ Here are some direct links: * {link-github}/discussions[Discussion Forum on GitHub] - A place to ask question and discuss all kind of questions around Mill * {link-github}/issues[Issue Tracker on GitHub] - Our issue tracker for bugs and features * {link-github}/pulls[Pull Requests on GitHub] - All new features and bug fixes find their way into Mill via a pull request. You can also sketch new ideas by creating a draft pull requests. -{link-chat}[Discord Chat] - You can also join our chat room if you like more direct communication or to just say hello -To build docs locally, `mill docs.localPages`. The last line of the command tells you where to browse the generated pages. From there you can follow the breadcrumbs. \ No newline at end of file + +To build docs locally, `mill docs.localPages`. The last line of the command tells you where to browse the generated pages. From there you can follow the breadcrumbs. diff --git a/blog/antora.yml b/blog/antora.yml new file mode 100644 index 00000000000..00be394c59e --- /dev/null +++ b/blog/antora.yml @@ -0,0 +1,8 @@ +name: blog +title: The Mill Build Engineering Blog +version: ~ +nav: + - modules/ROOT/nav.adoc +asciidoc: + attributes: + mill-version: dummy-mill-version diff --git a/blog/modules/ROOT/attachments/GC.java b/blog/modules/ROOT/attachments/GC.java new file mode 100644 index 00000000000..1cb1e429dec --- /dev/null +++ b/blog/modules/ROOT/attachments/GC.java @@ -0,0 +1,52 @@ +public class GC { + public static void main(String[] args) throws Exception{ + final long liveSetByteSize = Integer.parseInt(args[0]) * 1000000L; + final int benchMillis = Integer.parseInt(args[1]); + final int benchCount = Integer.parseInt(args[2]); + // 0-490 array entries per object, * 4-bytes per entry, + // + 20 byte array header = average 1000 bytes per entry + final int maxObjectSize = 490; + final int averageObjectSize = (maxObjectSize / 2) * 4 + 20; + + final int liveSetSize = (int)(liveSetByteSize / averageObjectSize); + + long maxPauseTotal = 0; + long throughputTotal = 0; + + for(int i = 0; i < benchCount + 1; i++) { + int chunkSize = 256; + Object[] liveSet = new Object[liveSetSize]; + for(int j = 0; j < liveSetSize; j++) liveSet[j] = new int[j % maxObjectSize]; + System.gc(); + long maxPause = 0; + long startTime = System.currentTimeMillis(); + + long loopCount = 0; + java.util.Random random = new java.util.Random(1337); + int liveSetIndex = 0; + + while (startTime + benchMillis > System.currentTimeMillis()) { + if (loopCount % liveSetSize == 0) Thread.sleep(1); + long loopStartTime = System.currentTimeMillis(); + liveSetIndex = random.nextInt(liveSetSize); + liveSet[liveSetIndex] = new int[liveSetIndex % maxObjectSize]; + long loopTime = System.currentTimeMillis() - loopStartTime; + if (loopTime > maxPause) maxPause = loopTime; + loopCount++; + } + if (i != 0) { + long benchEndTime = System.currentTimeMillis(); + long bytesPerLoop = maxObjectSize / 2 * 4 + 20; + throughputTotal += (long) (1.0 * loopCount * bytesPerLoop / 1000000 / (benchEndTime - startTime) * averageObjectSize); + maxPauseTotal += maxPause; + } + + System.out.println(liveSet[random.nextInt(liveSet.length)]); + } + + long maxPause = maxPauseTotal / benchCount; + long throughput = throughputTotal / benchCount; + + System.out.println("longest-gc: " + maxPause + " ms, throughput: " + throughput + " mb/s"); + } +} diff --git a/blog/modules/ROOT/attachments/GCBenchmark.java b/blog/modules/ROOT/attachments/GCBenchmark.java new file mode 100644 index 00000000000..d340d6d39db --- /dev/null +++ b/blog/modules/ROOT/attachments/GCBenchmark.java @@ -0,0 +1,78 @@ +package mill.main.client; + +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; + +public class GCBenchmark { + public static void main(String[] args) throws Exception { + String[][] javaGcCombinations = { {"23", "G1"}, {"23", "Z"} }; + + for (String[] combination : javaGcCombinations) { + String javaVersion = combination[0]; + String gc = combination[1]; + + System.out.println("Benchmarking javaVersion=" + javaVersion + " gc=" + gc); + + int[] liveSets = {400, 800, 1600, 3200, 6400}; + int[] heapSizes = {800, 1600, 3200, 6400, 12800}; + + List> lists = new ArrayList<>(); + + for (int liveSet : liveSets) { + List innerList = new ArrayList<>(); + + for (int heapSize : heapSizes) { + if (liveSet >= heapSize) innerList.add(new String[]{"", ""}); + else innerList.add(runBench(liveSet, heapSize, javaVersion, gc)); + } + + lists.add(innerList); + } + + renderTable(liveSets, heapSizes, lists, 0); + renderTable(liveSets, heapSizes, lists, 1); + } + } + + static String[] runBench(int liveSet, int heapSize, String javaVersion, String gc) throws Exception { + System.out.println("Benchmarking liveSet=" + liveSet + " heapSize=" + heapSize); + + String javaBin = "/Users/lihaoyi/Downloads/amazon-corretto-" + javaVersion + ".jdk/Contents/Home/bin/java"; + + ProcessBuilder processBuilder = new ProcessBuilder( + javaBin, "-Xmx" + heapSize + "m", "-XX:+Use" + gc + "GC", "GC.java", "" + liveSet, "10000", "5" + ); + + Process process = processBuilder.start(); + process.waitFor(); + + + List outputLines = + new String(process.getInputStream().readAllBytes()).lines().toList(); + + Optional result = outputLines.stream() + .filter(line -> line.startsWith("longest-gc: ")) + .map(line -> { + String[] parts = line.split(", throughput: "); + return new String[]{ + parts[0].split(": ")[1].trim(), + parts[1].trim() + }; + }) + .findFirst(); + + return result.orElse(new String[]{"error", "error"}); + } + + static void renderTable(int[] liveSets, int[] heapSizes, List> lists, int columnIndex) { + StringBuilder header = new StringBuilder("| live-set\\heap-size | "); + for (int heapSize : heapSizes) header.append(heapSize).append(" mb | "); + System.out.println(header); + for (int i = 0; i < liveSets.length; i++) { + StringBuilder row = new StringBuilder("| ").append(liveSets[i]).append(" mb | "); + for (String[] pair : lists.get(i)) row.append(pair[columnIndex]).append(" | "); + System.out.println(row); + } + } +} diff --git a/blog/modules/ROOT/images/G1GC.png b/blog/modules/ROOT/images/G1GC.png new file mode 100644 index 00000000000..5ea6bd1b379 Binary files /dev/null and b/blog/modules/ROOT/images/G1GC.png differ diff --git a/blog/modules/ROOT/images/ParallelGC.png b/blog/modules/ROOT/images/ParallelGC.png new file mode 100644 index 00000000000..f7f6bcce05f Binary files /dev/null and b/blog/modules/ROOT/images/ParallelGC.png differ diff --git a/blog/modules/ROOT/images/ParallelGC12800.png b/blog/modules/ROOT/images/ParallelGC12800.png new file mode 100644 index 00000000000..e181c10df14 Binary files /dev/null and b/blog/modules/ROOT/images/ParallelGC12800.png differ diff --git a/blog/modules/ROOT/images/ParallelGC200.png b/blog/modules/ROOT/images/ParallelGC200.png new file mode 100644 index 00000000000..436aa818f82 Binary files /dev/null and b/blog/modules/ROOT/images/ParallelGC200.png differ diff --git a/blog/modules/ROOT/images/ParallelGC3200.png b/blog/modules/ROOT/images/ParallelGC3200.png new file mode 100644 index 00000000000..1af9259a447 Binary files /dev/null and b/blog/modules/ROOT/images/ParallelGC3200.png differ diff --git a/blog/modules/ROOT/images/ParallelGC800.png b/blog/modules/ROOT/images/ParallelGC800.png new file mode 100644 index 00000000000..f1850f0600e Binary files /dev/null and b/blog/modules/ROOT/images/ParallelGC800.png differ diff --git a/blog/modules/ROOT/images/ParallelGenerational.png b/blog/modules/ROOT/images/ParallelGenerational.png new file mode 100644 index 00000000000..5825afd3854 Binary files /dev/null and b/blog/modules/ROOT/images/ParallelGenerational.png differ diff --git a/blog/modules/ROOT/images/ParallelNonGenerational.png b/blog/modules/ROOT/images/ParallelNonGenerational.png new file mode 100644 index 00000000000..a94bf5fd269 Binary files /dev/null and b/blog/modules/ROOT/images/ParallelNonGenerational.png differ diff --git a/blog/modules/ROOT/nav.adoc b/blog/modules/ROOT/nav.adoc new file mode 100644 index 00000000000..00f75cfd990 --- /dev/null +++ b/blog/modules/ROOT/nav.adoc @@ -0,0 +1,7 @@ + +* xref:6-garbage-collector-perf.adoc[] +* xref:5-executable-jars.adoc[] +* xref:4-flaky-tests.adoc[] +* xref:3-selective-testing.adoc[] +* xref:2-monorepo-build-tool.adoc[] +* xref:1-java-compile.adoc[] diff --git a/blog/modules/ROOT/pages/1-java-compile.adoc b/blog/modules/ROOT/pages/1-java-compile.adoc new file mode 100644 index 00000000000..d73c2bedd63 --- /dev/null +++ b/blog/modules/ROOT/pages/1-java-compile.adoc @@ -0,0 +1,359 @@ +// tag::header[] + +# How Fast Does Java Compile? +:page-aliases: xref:mill:ROOT:comparisons/java-compile.adoc + +:author: Li Haoyi +:revdate: 29 November 2024 +_{author}, {revdate}_ + +include::mill:ROOT:partial$gtag-config.adoc[] + +Java compiles have the reputation for being slow, but that reputation does +not match today's reality. Nowadays the Java compiler can compile "typical" Java code at over +100,000 lines a second on a single core. That means that even a million line project +should take more than 10s to compile in a single-threaded fashion, and should be even +faster in the presence of parallelism + +// end::header[] + + +Doing some ad-hoc benchmarks, we find that although the compiler is blazing fast, all +build tools add significant overhead over compiling Java directly: + +|=== +| *Mockito Core* | *Time* | *Compiler lines/s* | *Slowdown* | *Netty Common* | *Time* | *Compiler lines/s* | *Slowdown* +| *Javac Hot* | 0.36s | 115,600 | 1.0x | *Javac Hot* | 0.29s | 102,500 | 1.0x +| *Javac Cold* | 1.29s | 32,200 | 4.4x | *Javac Cold* | 1.62s | 18,300 | 5.6x +| *Mill* | 1.20s | 34,700 | 4.1x | *Mill* | 1.11s | 26,800 | 3.8x +| *Gradle* | 4.41s | 9,400 | 15.2x | *Maven* | 4.89s | 6,100 | 16.9x +|=== + +Although Mill does the best in these benchmarks among the build tools (Maven, Gradle, and Mill), +all build tools fall short of how fast compiling Java _should_ be. This post explores how +these numbers were arrived at, and what that means in un-tapped potential for Java build +tooling to become truly great. + +## Mockito Core + +To begin to understand the problem, lets consider the codebase of the popular Mockito project: + +* https://github.com/mockito/mockito + +Mockito is a medium-sized Java project with a few dozen sub-modules and about ~100,000 lines +of code. To give us a simple reproducible scenario, let's consider the root mockito module +with sources in `src/main/java/`, on which all the downstream module and tests depend on. + +Mockito is built using Gradle. It's not totally trivial to extract the compilation classpath +from Gradle, but the following stackoverflow answer gives us some tips: + +* https://stackoverflow.com/a/50639444/871202[How do I print out the Java classpath in gradle?] + +```bash +> ./gradlew clean && ./gradlew :classes --no-build-cache --debug | grep "classpath " +``` + +This gives us the following classpath: + +``` +export MY_CLASSPATH=/Users/lihaoyi/.gradle/caches/modules-2/files-2.1/net.bytebuddy/byte-buddy/1.14.18/81e9b9a20944626e6757b5950676af901c2485/byte-buddy-1.14.18.jar:/Users/lihaoyi/.gradle/caches/modules-2/files-2.1/net.bytebuddy/byte-buddy-agent/1.14.18/417558ea01fe9f0e8a94af28b9469d281c4e3984/byte-buddy-agent-1.14.18.jar:/Users/lihaoyi/.gradle/caches/modules-2/files-2.1/junit/junit/4.13.2/8ac9e16d933b6fb43bc7f576336b8f4d7eb5ba12/junit-4.13.2.jar:/Users/lihaoyi/.gradle/caches/modules-2/files-2.1/org.hamcrest/hamcrest-core/2.2/3f2bd07716a31c395e2837254f37f21f0f0ab24b/hamcrest-core-2.2.jar:/Users/lihaoyi/.gradle/caches/modules-2/files-2.1/org.opentest4j/opentest4j/1.3.0/152ea56b3a72f655d4fd677fc0ef2596c3dd5e6e/opentest4j-1.3.0.jar:/Users/lihaoyi/.gradle/caches/modules-2/files-2.1/org.objenesis/objenesis/3.3/1049c09f1de4331e8193e579448d0916d75b7631/objenesis-3.3.jar:/Users/lihaoyi/.gradle/caches/modules-2/files-2.1/org.hamcrest/hamcrest/2.2/1820c0968dba3a11a1b30669bb1f01978a91dedc/hamcrest-2.2.jar +``` + +Note that for this benchmark, all third-party dependencies have already been resolved +and downloaded from Maven Central. We can thus simply reference the jars on disk directly, +which we do above. + +We can then pass this classpath into `javac -cp`, together with `src/main/java/**/*.java`, +to perform the compilation outside of Gradle using `javac` directly. Running this a few +times gives us the timings below: + +```bash +> time javac -cp $MY_CLASSPATH src/main/java/**/*.java +1.290s +1.250s +1.293s +``` + +To give us an idea of how many lines of code we are compiling, we can run: + +```bash +> find src/main/java | grep \\.java | xargs wc -l +... +41601 total +``` + +Combining this information, we find that 41601 lines of code compiled in ~1.29 seconds +(taking the median of the three runs above) suggests that `javac` compiles about ~32,000 +lines of code per second. + +These benchmarks were run ad-hoc on my laptop, an M1 10-core Macbook Pro, with OpenJDK +Corretto 17.0.6. The numbers would differ on different Java versions, hardware, operating systems, +and filesystems. Nevertheless, the overall trend is strong enough that you should be +able to reproduce the results despite variations in the benchmarking environment. + +Compiling 32,000 lines of code per second is not bad. But it is nowhere near how fast the +Java compiler _can_ run. Any software experience with JVM experience would know the next +obvious optimization for us to explore. + +## Keeping the JVM Hot + +One issue with the above benchmark is that it uses `javac` as a sub-process. The Java +compiler runs on the Java Virtual Machine, and like any JVM application, it has a slow +startup time, takes time warming-up, but then has good steady-state performance. +Running `javac` from the command line and compiling ~32,000 lines/sec is thus the _worst_ +possible performance you could get out of the Java compiler on this Java codebase. + +To get good performance out of `javac`, like any other JVM application, we need to keep it +long-lived so it has a chance to warm up. While running the `javac` in a long-lived Java +program is not commonly taught, neither is it particularly difficult. Here is a complete +`Bench.java` file that does this, repeatedly running java compilation in a loop where it +has a chance to warm up, to emulate the long lived JVM process that a build tool like Mill +may spawn and manage. We use the same `MY_CLASSPATH` and source files we saw earlier and +print the output statistics to the terminal so we can see how fast Java compilation can +occur once things have a chance to warm up: + +```java +// Bench.java +import javax.tools.*; +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.nio.file.*; +import java.util.List; +import java.util.stream.Collectors; + +public class Bench { + public static void main(String[] args) throws Exception { + while (true) { + long now = System.currentTimeMillis(); + String classpath = System.getenv("MY_CLASSPATH"); + Path sourceFolder = Paths.get("src/main/java"); + + List files = Files.walk(sourceFolder) + .filter(p -> p.toString().endsWith(".java")) + .map(p -> + new SimpleJavaFileObject(p.toUri(), JavaFileObject.Kind.SOURCE) { + public CharSequence getCharContent(boolean ignoreEncodingErrors) throws IOException { + return Files.readString(p); + } + } + ) + .collect(Collectors.toList()); + + JavaCompiler compiler = ToolProvider.getSystemJavaCompiler(); + + StandardJavaFileManager fileManager = compiler + .getStandardFileManager(null, null, null); + + // Run the compiler + JavaCompiler.CompilationTask task = compiler.getTask( + new OutputStreamWriter(System.out), + fileManager, + null, + List.of("-classpath", classpath), + null, + files + ); + + System.out.println("Compile Result: " + task.call()); + long end = System.currentTimeMillis(); + long lineCount = Files.walk(sourceFolder) + .filter(p -> p.toString().endsWith(".java")) + .map(p -> { + try { return Files.readAllLines(p).size(); } + catch(Exception e){ throw new RuntimeException(e); } + }) + .reduce(0, (x, y) -> x + y); + System.out.println("Lines: " + lineCount); + System.out.println("Duration: " + (end - now)); + System.out.println("Lines/second: " + lineCount / ((end - now) / 1000)); + } + } +} +``` + +Running this using `java Bench.java` in the Mockito repo root, eventually we see it +settle on approximately the following numbers: + +```bash +359ms +378ms +353ms +``` + +The codebase hasn't changed - we are still compiling 41,601 lines of code - +but now it only takes ~359ms. That tells us that using a long-lived warm Java compiler +we can compile approximately *116,000* lines of Java a second on a single core. + + +Compiling 116,000 lines of Java per second is very fast. That means we should expect +a million-line Java codebase to compile in about 9 seconds, _on a single thread_. That +may seem surprisingly fast, and you may be forgiven if you find it hard to believe. As +mentioned earlier, this number is expected to vary based on the codebase being compiled; +could it be that Mockito-Core just happens to be a very simple Java module that compiles +quickly? + +## Double-checking Our Results + +To double-check our results, we can pick another codebase to run some ad-hoc benchmarks. +For this I will use the Netty codebase: + +- https://github.com/netty/netty + +Netty is a large-ish Java project: ~500,000 lines of code. Again, to pick a somewhat +easily-reproducible benchmark, we want a decently-sized module that's relatively +standalone within the project: `netty-common` is a perfect fit. Again, we can use `find | grep | xargs` +to see how many lines of code we are looking at: + +```bash +$ find common/src/main/java | grep \\.java | xargs wc -l +29712 total +``` + +Again, Maven doesn't make it easy to show the classpath used to call `javac` ourselves, +but the following stackoverflow answer gives us a hint in how to do so: + +- https://stackoverflow.com/a/16655088/871202[In Maven, how output the classpath being used?] + +```bash +> ./mvnw clean; time ./mvnw -e -X -pl common -Pfast -DskipTests -Dcheckstyle.skip -Denforcer.skip=true install +``` + +If you grep the output for `-classpath`, we see: + +```bash +-classpath /Users/lihaoyi/Github/netty/common/target/classes:/Users/lihaoyi/.m2/repository/org/graalvm/nativeimage/svm/19.3.6/svm-19.3.6.jar:/Users/lihaoyi/.m2/repository/org/graalvm/sdk/graal-sdk/19.3.6/graal-sdk-19.3.6.jar:/Users/lihaoyi/.m2/repository/org/graalvm/nativeimage/objectfile/19.3.6/objectfile-19.3.6.jar:/Users/lihaoyi/.m2/repository/org/graalvm/nativeimage/pointsto/19.3.6/pointsto-19.3.6.jar:/Users/lihaoyi/.m2/repository/org/graalvm/truffle/truffle-nfi/19.3.6/truffle-nfi-19.3.6.jar:/Users/lihaoyi/.m2/repository/org/graalvm/truffle/truffle-api/19.3.6/truffle-api-19.3.6.jar:/Users/lihaoyi/.m2/repository/org/graalvm/compiler/compiler/19.3.6/compiler-19.3.6.jar:/Users/lihaoyi/.m2/repository/org/jctools/jctools-core/4.0.5/jctools-core-4.0.5.jar:/Users/lihaoyi/.m2/repository/org/jetbrains/annotations-java5/23.0.0/annotations-java5-23.0.0.jar:/Users/lihaoyi/.m2/repository/org/slf4j/slf4j-api/1.7.30/slf4j-api-1.7.30.jar:/Users/lihaoyi/.m2/repository/commons-logging/commons-logging/1.2/commons-logging-1.2.jar:/Users/lihaoyi/.m2/repository/org/apache/logging/log4j/log4j-1.2-api/2.17.2/log4j-1.2-api-2.17.2.jar:/Users/lihaoyi/.m2/repository/org/apache/logging/log4j/log4j-api/2.17.2/log4j-api-2.17.2.jar:/Users/lihaoyi/.m2/repository/io/projectreactor/tools/blockhound/1.0.6.RELEASE/blockhound-1.0.6.RELEASE.jar +``` + +Again, we can `export MY_CLASSPATH` and start using `javac` from the command line: + +```bash +> javac -cp $MY_CLASSPATH common/src/main/java/**/*.java +1.624s +1.757s +1.606s +``` + +Or programmatically using the `Bench.java` program we saw earlier: + +```bash +294ms +282ms +285ms +``` + +Taking 285ms for a hot-in-memory compile of 29,712 lines of code, `netty-common` +therefore compiles at *~104,000 lines/second*. + +Although the choice of project is arbitrary, Mockito-Core and Netty-Common are decent +examples of Java code found "out in the wild". They aren't synthetic fake codebases generated +for the purpose of benchmarks, nor are they particularly unusual or idiosyncratic. They follow +most Java best practices and adhere to many of the most common Java linters (although those +were disabled for this performance benchmark). This is Java code that looks just like +any Java code you may write in your own projects, and it effortlessless compiles at +>100,000 lines/second. + +## What About Build Tools? + +Although the Java Compiler is blazing fast - compiling code at >100k lines/second and +completing both Mockito-Core and Netty-Common in ~300ms - the experience of using typical Java +build tools is nowhere near as snappy. Consider the benchmark of clean-compiling the +Mockito-Core codebase using Gradle or Mill: + +```bash +$ ./gradlew clean; time ./gradlew :classes --no-build-cache +4.14s +4.41s +4.41s + +$ ./mill clean; time ./mill compile +1.20s +1.12s +1.30s +``` + +Or the benchmark of clean-compiling the Netty-Common codebase using Maven or Mill: + +```bash +$ ./mvnw clean; time ./mvnw -pl common -Pfast -DskipTests -Dcheckstyle.skip -Denforcer.skip=true -Dmaven.test.skip=true install +4.85s +4.96s +4.89s + +$ ./mill clean common; time ./mill common.compile +1.10s +1.12s +1.11s +``` + +These benchmarks are run in similar conditions as those we saw earlier: ad-hoc on my M1 +Macbook Pro, with the metadata and jars of all third-party dependencies already downloaded and +cached locally. So the time we are seeing above is purely the Java compilation + the overhead +of the build tool realizing it doesn't need to do anything except compile the Java source code +using the dependencies we already have on disk. + +Tabulating this all together gives us the table we saw at the start of this page: + +|=== +| Mockito Core | Time | Compiler lines/s | Slowdown | Netty Common | Time | Compiler lines/s | Slowdown +| Javac Hot | 0.36s | 115,600 | 1.0x | Javac Hot | 0.29s | 102,500 | 1.0x +| Javac Cold | 1.29s | 32,200 | 4.4x | Javac Cold | 1.62s | 18,300 | 5.6x +| Mill | 1.20s | 34,700 | 4.1x | Mill | 1.11s | 26,800 | 3.8x +| Gradle | 4.41s | 9,400 | 15.2x | Maven | 4.89s | 6,100 | 16.9x +|=== + +We explore the comparison between xref:mill:ROOT:comparisons/gradle.adoc[Gradle vs Mill] +or xref:mill:ROOT:comparisons/maven.adoc[Maven vs Mill] in more detail on their own dedicated pages. +For this article, the important thing is not comparing the build tools against each other, +but comparing the build tools against what how fast they _could_ be if they just used +the `javac` Java compiler directly. And it's clear that compared to the actual work +done by `javac` to actually compile your code, build tools add a frankly absurd amount +of overhead ranging from ~4x for Mill to 15-16x for Maven and Gradle! + +## Whole Project Compile Speed + +One thing worth calling out is that the overhead of the various build tools does not +appear to go down in larger builds. This *Clean Compile Single-Module* benchmark +we explored above only deals with compiling a single small module. But a similar *Sequential +Clean Compile* benchmarks which compiles the entire Mockito and Netty projects on +a single core shows similar numbers for the various build tools: + +* xref:mill:ROOT:comparisons/gradle.adoc#_sequential_clean_compile_all[Gradle compiling 100,000 lines of Java at ~5,600 lines/s] +* xref:mill:ROOT:comparisons/maven.adoc#_sequential_clean_compile_all[Maven compiling 500,000 lines of Java at ~5,100 lines/s] +* Mill compiling at ~25,000 lines/s on both the above whole-project benchmarks + +All of these are far below the 100,000 lines/s that we should expect from Java compilation, +but they roughly line up with the numbers measured above. Again, these benchmarks are ad-hoc, +on arbitrary hardware and JVM versions. They do include small amounts of other work, such +as compiling C/C++ code in Netty or doing ad-hoc file operations in Mockito. However, +most of the time is still spent in compilation, and this reinforces the early finding +that build tools (especially older ones like Maven or Gradle) are indeed adding huge +amounts of overhead on top of the extremely-fast Java compiler. + +## Conclusion + +From this study we can see the paradox: the Java _compiler_ is blazing fast, +while Java _build tools_ are dreadfully slow. Something that _should_ compile in a fraction +of a second using a warm `javac` takes several seconds (15-16x longer) to +compile using Maven or Gradle. Mill does better, but even it adds 4x overhead and falls +short of the snappiness you would expect from a compiler that takes ~0.3s to compile the +30-40kLOC Java codebases we experimented with. + +These benchmarks were run ad-hoc and on my laptop on arbitrary codebases, and the details +will obviously differ depending on environment and the code in question. Running it on an +entire codebase, rather than a single module, will give different results. Nevertheless, the +results are clear: "typical" Java code _should_ compile at ~100,000 lines/second on a single +thread. Anything less is purely build-tool overhead from Maven, Gradle, or Mill. + +Build tools do a lot more than the Java compiler. They do dependency management, parallelism, +caching and invalidation, and all sorts of other auxiliary tasks. But in the common case where +someone edits code and then compiles it, and all your dependencies are already downloaded and +cached locally, any time doing other things and not spent _actually +compiling Java_ is pure overhead. Checking for cache invalidation in _shouldn't_ take 15-16x +as long as actually compiling your code. I mean it obviously does _today_, but it _shouldn't_! + +The Mill build tool goes to great lengths to try and minimize overhead, and already gets +xref:mill:ROOT:comparisons/why-mill.adoc#_performance[~4x faster builds] than Maven or Gradle on +real-world projects like Mockito or Netty. But there still is a long way to go give Java +developers the fast, snappy experience that the underlying Java platform can provide. If +Java build and compile times are things you find important, you should try out Mill on +your own projects and get involved in the effort! \ No newline at end of file diff --git a/blog/modules/ROOT/pages/2-monorepo-build-tool.adoc b/blog/modules/ROOT/pages/2-monorepo-build-tool.adoc new file mode 100644 index 00000000000..f21a1f01b09 --- /dev/null +++ b/blog/modules/ROOT/pages/2-monorepo-build-tool.adoc @@ -0,0 +1,283 @@ +// tag::header[] + +# Why Use a Monorepo Build Tool? + + +:author: Li Haoyi +:revdate: 17 December 2024 +_{author}, {revdate}_ + +include::mill:ROOT:partial$gtag-config.adoc[] + + +Software build tools mostly fall into two categories: + +1. Single-language build tools, e.g. + https://maven.apache.org/[Maven] (Java), https://python-poetry.org/[Poetry] (Python), + https://doc.rust-lang.org/cargo/[Cargo] (Rust) + +2. Monorepo build tools targeting large codebases, e.g. https://bazel.build/[Bazel], + https://www.pantsbuild.org/[Pants], https://buck.build/[Buck], and https://mill-build.org/[Mill] + +One question that comes up constantly is why do people use Monorepo build tools? Tools +like Bazel are orders of magnitude more complicated and hard to use than tools +like Poetry or Cargo, so why do people use them at all? + +// end::header[] + + + +It turns out that Monorepo build tools like Bazel or Mill do a lot of non-obvious things that +other build tools don't, that become important in larger codebases (100-10,000 active developers). +These features are generally irrelevant for smaller projects, which explains why most people +do not miss them. But past a certain size of codebase and engineering organization these +features become critical. We'll explore some of the core features of "Monorepo Build Tools" +below, from the perspective of Bazel (which I am familiar with) and Mill (which this +technical blog is about). + + +## Support for Multiple Languages + +While small software projects usually start in one programming language, larger ones +inevitably grow more heterogeneous over time. For example, you may be building a Go binary +and Rust library which are both used in a Python executable, which is then tested using a +Bash script and deployed as part of a Java backend server. The Java backend server may also +server a front-end web interface compiled from Typescript, and the whole deployment again +tested using Selenium in Python or Playwright in Javascript. + +The reality of working in any large codebase and organization, such rube-goldberg +code paths _do_ happen on a regular basis, and any monorepo build tool has to accommodate them. +If the build tool does not accommodate multiple languages, then what ends up happening is you +end up having lots of small build tools wired together. Taking the example above, +you may have: + +- A simple Maven build for your backend server, +- A simple Webpack build for the Web frontend +- A simple Poetry build for your Python executable +- A simple Cargo build for your Rust library +- A simple Go build for your Go binary + +Although each tool does its job, none of those tools are sufficient to build/test/deploy +your project! Instead, you end up having a `bin/` or `build/` folder full of `.sh` scripts +that wire up these simpler per-language build tools in ad-hoc ways. And while the individual +language-specific build tools may be clean and simple, the rats nest of shell scripts that +you also need usually ends up being a mess that is impossible to work with. + +That is why monorepo build tools like Bazel and Mill try to be _language agnostic_. +Although they may come with some built in functionality (e.g. Bazel comes with C/C++/Java +support built in, Mill comes with Java/Scala/Kotlin), monorepo build tools must make +it extending them to support additional languages _easy_. Bazel via its ecosystem +of `rules_*` libraries, Mill via it's extensibility APIs which make it easy to +implement your own support for additional languages like +xref:mill:ROOT:extending/example-python-support.adoc[Python] or +xref:mill:ROOT:extending/example-typescript-support.adoc[Typescript]. That means that when +the built-in language support runs out - which is inevitable in large growing monorepos - +the user can smoothly extend the build tool to keep going rather than falling back to +ad-hoc shell scripts. + +## Support for Custom Build Tasks + +As projects get large, they also get more unique. Every hello-world Java or Python or +Javascript project looks about the same, but larger projects start having unusual +requirements that no-one else does, for example: + +- Invoking a bespoke code-generator to integrate with your company's internal RPC system + + +- Generating custom deployment artifact formats to support that one legacy datacenter you + need to get your code running in + +- Downloading third-party dependency sources, patching them, and building them from source + to work around issues that you have fixed but not yet succeeded in upstreaming + +- Compiling the compiler you need to compile the rest of your codebase, again perhaps + to make use of compiler bugfixes that you have not yet managed to get into an upstream release. + +The happy paths in build tools are usually great, and the slightly-off-the-beaten-path +workflows usually have third-party plugins supporting them: things like linting, generating +docker containers, and so on. But as any growing software organization quickly finds itself +with build-time use cases that nobody else in the world has. At that point the paved paths +have ended and the build engineers will need to implement the custom build tasks themselves + +Every build tool allows some degree of customization, but how easy and flexible they are +differs from tool to tool. e.g. a build tool like Maven requires its plugins to fit into +a very restricted Build Lifecycle (https://maven.apache.org/guides/introduction/introduction-to-the-lifecycle.html[link]): +this is good when compiling Java source code is all you need to do, but can be problematic when +you need to do something more far afield. The alternative is the aforementioned rats-nest +of shell scripts - either wrapping or wrapped by the traditional build tools - that implement +the custom build tasks you require. + +That is why monorepo build tools like Bazel and Mill make it easy to write custom tasks. In +Bazel a custom task is just a https://bazel.build/reference/be/general#genrule[genrule()], in Mill +just `def foo = Task { ... }` with a block of code doing what you need, +and you can even xref:mill:ROOT:extending/import-ivy-plugins.adoc[use any third-party JVM library] +you are already familiar with as part of your custom task. This helps ensure your custom +tasks are written in concise type-checked code with automatic caching and parallelism, +which are all things that are lacking if you start implementing your logic outside of +your build tool in ad-hoc scripts. + +## Automatically Caching and Parallelizing Everything + +In most build tools, caching is opt in, so the core build/compile tasks usually end up getting +cached but everything else is not and ends up being wastefully recomputed all the time. In +monorepo build tools like Bazel or Mill, everything is cached and everything is parallelized. +Even tests are cached so if you run a test twice on the same code and inputs (transitively), +the second time it is skipped. + +The importance of caching and parallelism grows together with the codebase: + +- For smaller codebases, you do not need to cache or parallelize at all: compilation and + testing are fast enough that you can just run them every time from a clean slate + without inconvenience + +- For medium-sized codebases, caching and parallelizing the slowest tasks (e.g. compilation + or testing) is usually enough. Most build tools have some support for manually opting-in to + some kind of caching or parallelization framework, and although you will likely miss out + on many "ad-hoc" build tasks that still run un-cached and sequentially, those are few + enough not to matter + +- For large codebases, you want everything to be cached and parallelized. Caching just the + "core" build tasks is no longer enough, and any non-cached or non-parallel build tasks + results in noticeable slowness and inconvenience. + +Take ad-hoc source code generation as an example: a small codebase may not have any. A +medium-sized codebase may have some but little enough that it doesn't matter if it runs +sequentially un-cached. But a large codebase may have multiple RPC IDL +code generators (e.g. https://protobuf.dev/[protobuf], https://thrift.apache.org/[thrift], +static resource pre-processors, and other custom tasks that not caching and parallelizing +these causes visible slowdowns and inconvenience. + +In monorepo build tools like Mill or Bazel, caching and parallelism are automatic and +enabled by default. That means that it doesn't matter what you are running - whether +it's the core compilation workflows or some ad-hoc custom tasks - you always get the +benefits of caching and parallelization to keep your build system fast and responsive. + +## Seamless Remote Caching + +"Remote caching" means I can compile something on my laptop, you download it to your laptop +for usage. "Seamless" means I don't need to do anything to get this behavior - no manual +commands to upload and download - so the distribution of build outputs from my laptop to +yours happens completely automatically. + +This also applies to tests: e.g. if TestFoo was run in CI on master, if I pull +master and run all tests without changing the Foo code, TestFoo is skipped and uses the +CI result. + +Bazel, Pants, and many other monorepo build tools support this out of the box, with +open source back-end servers such as https://github.com/buchgr/bazel-remote[Bazel Remote]. +The clients and servers all conform to a https://github.com/bazelbuild/remote-apis[standardize +protocol], so you can easily drop in a new server or new build client and have it work +with all your existing infrastructure. Mill does not yet support remote caching, but there +are some https://github.com/com-lihaoyi/mill/pull/2777[prototypes] and +https://github.com/com-lihaoyi/mill/pull/4065[work in progress] that will hopefully +add support in the not-too-distant future. + +## Remote Execution + +"Remote execution" means that I can run "compile" on my laptop and have it automatically +happen in the cloud on 96 core machines, or I run a lot of tests (e.g. after a big refactor) +on my laptop and it seamlessly gets farmed out to run 1024x parallel on a large +compute cluster. + +Remote execution is valuable for two reasons: + +1. *Better Parallelism*: + The largest cloud machines you can get are typically around 96 cores, whereas if you farm + out the execution to a cluster you can easily run on many 1024 or more cores in parallel + +2. *Better Utilization*: e.g. If you + give every individual a 96 core devbox, most of the time when they are not actively running + anything (e.g. they are thinking, typing, talking to someone, etc.) those 96 cores are + completely idle. It's not usual for utilization on devboxes to be <1% while you are still + paying for the other 99% of idle CPU time. In contrast, an auto-scaling remote execution + cluster can spin down machines that are not in use, and achieve >50% utilization rates + +One surprising thing is that remote execution can be both faster _and_ cheaper_than running +things locally on a laptop or devbox! Running 256 cores for 1 minute doesn't cause any more +cloud spending than running 16 cores for 16 minutes, even though the former finishes 16x +faster! And due to the improved utilization from remote execution clusters, the total savings +can be significant. + +Monorepo build tools like Bazel, Pants, and Buck all support remote execution out of the box. +Mill does not support it, which means it might not be suitable for the largest monorepos +with >10,000 active developers. + +## Dependency based test selection + +When using Bazel to build a large project, you can use bazel query to determine the possible +targets and tests affected by a code change, allowing you to easily set up pull-request validation +to only run tests downstream of a PR diff and skip unrelated ones. The Mill build tool also supports +this, as xref:mill:ROOT:large/selective-execution.adoc[Selective Execution], letting you snapshot +your code before and after a code change and only run tasks that are downstream of those changes. + +Fundamentally, running "all tests" in CI is wasteful when you know from the build tool +that only some tests are relevant to the code change being tested. If every pull request always +runs every single test in a monorepo, then it's natural for PR validation times to grow unbounded +as the monorepo grows. Sooner or later this will start causing issues. + + +Any large codebase that doesn't use a monorepo build tool ends up re-inventing this manually, e.g. +consider this code in apache/spark that re-implements this in a Python script that wraps +`mvn` or `sbt` (https://github.com/apache/spark/blob/290b4b31bae2e02b648d2c5ef61183f337b18f8f/dev/sparktestsupport/modules.py#L108-L126[link]). +With a proper monorepo build tool, such functionality comes for free out-of-the-box with better +precision and correctness than anything you could hack together manually. + +## Build Task Sandboxing + +There are two kinds of sandboxing that monorepo build tools like Bazel do: + +1. *Semantic sandboxing*: this ensures your build tasks do not make use of un-declared files, + or write to places on disk that can affect other tasks. In most build tools, this + kind of mistake results in confusing nondeterministic parallelism and cache invalidation + problems down the road, where e.g. your build step may rely on a file on disk but not realize + it needs to re-compute when the file changes. In Bazel, these mis-configurations result in a + deterministic error up front, enforced via a https://bazel.build/docs/sandboxing[variety of mechanisms] + (e.g. https://en.wikipedia.org/wiki/Cgroups[CGroups] on Linux, + https://www.chromium.org/developers/design-documents/sandbox/osx-sandboxing-design/[Seatbelt Sandboxes] on Mac-OSX). + +1. *Resource sandboxing*: Bazel also has the ability to limit CPU/Memory usage + (https://github.com/bazelbuild/bazel/pull/21322), which eliminates the noisy neighbour + problem and ensures a build step or test gets the same compute footprint whether run alone + during development or 96x parallel on a CI worker. + Otherwise it's common for tests to pass when run alone during manual development, then timeout + or OOM when run in CI under resource pressure from other tests hogging the CPU or RAM + +Both kinds of sandboxing have the same goal: to make sure your build tasks behave the same +way no matter how they are run sequentially or in parallel with one another. Even Bazel's +sandboxes aren't 100% hermetic, but are hermetic enough + +xref:mill:ROOT:depth/sandboxing.adoc[The Mill build tool's sandboxing] is less powerful +than Bazel's CGroup/Seatbelt sandboxes, and simply runs tasks and subprocesses in +sandbox directories to try and limit cross-task interference. But it has the same goal +of adding best-effort guardrails to mitigate race conditions and non-determinism. + +## Who Needs Monorepo Build Tools? + +Most small projects never need the features listed above: small projects build quickly +without any optimizations, use a single language toolchain without customization, and +any bugs related to non-determinism or resource footprint can usually be investigated +and dealt with manually. Any missing build-tool features can be papered over with shell +scripts. + +That is how every small project starts, and as most small projects never grow big you +can go quite a distance without needing anything more. While the features above would be +nice to have, they are _wants_ rather than _needs_. + +But once in a while, a project _does_ grow large. Sometimes the rocket-ship really _does_ +take off! In such cases, as the number of developers grows from 1 to 10 to 1,000, +you will inevitably start feeling pain: + +1. Local build times slowing to a crawl on your laptop, using 1 out of 16 available CPUs +2. Pull-request validation taking 4 hours to run mostly-unnecessary tests with a 50% flake rate +3. An unmaintainable multi-layer jungle of shell, Python, and Make scripts layered on + top of your classic build tools like Maven/Poetry/Cargo, that everyone knows should be + cleaned up but nobody knows how. + +Monorepo build tools bring performance optimizations to +bring down CI times, sandboxing improvements to reduce flakiness, and structured way +of replacing the ubiquitous folder-full-of-bash-scripts. It is these features that really +let a codebase _scale_, allowing you to grow your developer team from 100 to 1,000 developers +and beyond without everything grinding to a halt. That is why people use "monorepo build tools" +like Mill (most suitable for projects 10-1,000 active developers) or Bazel +(most suitable for larger projects 100-10,000 active developers) . + diff --git a/blog/modules/ROOT/pages/3-selective-testing.adoc b/blog/modules/ROOT/pages/3-selective-testing.adoc new file mode 100644 index 00000000000..ca2499eb0a0 --- /dev/null +++ b/blog/modules/ROOT/pages/3-selective-testing.adoc @@ -0,0 +1,549 @@ +// tag::header[] + +# Faster CI with Selective Testing + + +:author: Li Haoyi +:revdate: 24 December 2024 +_{author}, {revdate}_ + +include::mill:ROOT:partial$gtag-config.adoc[] + +Selective testing is a key technique necessary for working with any large codebase +or monorepo: picking which tests to run to validate a change or pull-request, because +running every test every time is costly and slow. This blog post will explore what +selective testing is all about, the different approaches you can take with selective +testing, based on my experience working on developer tooling and CI for the last decade at +Dropbox and Databricks. Lastly, we will discuss +xref:mill::large/selective-execution.adoc[how the Mill build tool supports selective testing]. + +// end::header[] + +## Large Codebases + +Although codebases can be large, in any large codebase you are typically only working on +a fraction of it at any point in time. As an example that we will use throughout this +article, consider a large codebase or monorepo that contains the code for: + +1. 3 backend services: `backend_service_1`, `backend_service_2`, `backend_service_3` +2. 2 frontend web codebases: `frontend_web_1`, `frontend_web_2` +3. 1 `backend_service_utils` module, and 1 `frontend_web_utils` module +4. 3 deployments: `deployment_1`, `deployment_2`, `deployment_3`, that make use of the + backend services and frontend web codebases. +5. The three deployments may be combined into a `staging_environment`, which is then + used in three sets of ``end_to_end_test``s, one for each deployment + +These modules may depend on each other as shown in the diagram below, +with the various `backend_service` and `frontend_web` codebases +grouped into `deployments`, and the `_utils` files shared between them: + +```graphviz +digraph G { + rankdir=LR + node [shape=box width=0 height=0 style=filled fillcolor=white] + + frontend_web_utils -> frontend_web_2 -> deployment_2 + frontend_web_utils -> frontend_web_1 -> deployment_1 + + backend_service_utils -> backend_service_3 -> deployment_3 + backend_service_utils -> backend_service_2 -> deployment_2 + backend_service_utils -> backend_service_1 -> deployment_1 + deployment_1 -> staging_environment + deployment_2 -> staging_environment + deployment_3 -> staging_environment + staging_environment -> end_to_end_test_1 + staging_environment -> end_to_end_test_2 + staging_environment -> end_to_end_test_3 +} +``` + +These various modules would typically each have their own test suite, which +for simplicity I left out of the diagrams. + +Although the codebase described above is just an example, it reflects the kind of +codebase structure that exists in many real-world codebases. Using this example, +let us now consider a few ways in which selective testing can be used. + +## No Selective Testing + +Most software projects start off naively running every test on every pull-request +to validate the changes. For small projects this is fine. Every project starts +small and most projects stay small, so this approach is not a problem at all. + +However, for the projects that do continue growing, this strategy quickly +becomes infeasible: + +* The size of the codebase (in number of modules, lines of code, or number of tests) grows + linearly `O(n)` over time +* The number of tests necessary to validate any pull-request also grows linearly `O(n)` + +This means that although the test runs may start off running quickly, they naturally slow +down over time: + +* A week-old project may have a test suite that runs in seconds +* By a few months, the test suite may start taking minutes to run +* After a few years, the tests may be taking over an hour. +* And there is no upper bound: in a large growing project test runs can easily + take several hours or even days to run + +Although at a small scale waiting seconds or minutes for tests to run is not a problem, +the fact that it grows unbounded means it will _eventually_ become a problem on any growing +codebase. Large codebases with test suites taking 4-8 hours to run are not uncommon at all, and +this can become a real bottleneck in how fast developers can implement features or otherwise +improve the functionality of the codebase. + +In general, "no selective testing" works fine for small projects with 1-10 developers, but +beyond that the inefficiency of running tests not relevant to your changes starts noticeably +slowing down day to day development. At that point, the first thing people reach for is +some kind of folder-based selective testing. + + +## Folder-based Selective Testing + +Typically, in any large codebase, most work happens within a single part of it: e.g. a +developer may be working on `backend_service_1`, another may be working on `frontend_web_2`. +The obvious thing to do would be make sure each module is in its own folder, e.g. + +``` +my_repo/ + backend_service_1/ + backend_service_2/ + backend_service_3/ + frontend_web_1/ + frontend_web_2/ + backend_service_utils/ + frontend_web_utils/ + deployment_1/ + deployment_2/ + deployment_3/ + ... +``` + +To do simple module-based selective execution generally involves: + +1. Run a `git diff` on the code change you want to validate +2. For any folders which contain changed files, run their tests + +For example, + +- A PR changing `backend_service_1/` will need to run the `backend_service_1/` tests +- A PR changing `frontend_web_2/` will need to run the `frontend_web_2/` tests + +This does limit the number of tests any individual needs to execute: someone working on +`backend_service_1` does not need to run the tests for `backend_service_2`, `backend_service_3`, +etc.. This helps keep the test times when working on the monorepo from growing unbounded. + +However, folder-based selective testing is not enough: it is possible that changing +a module would not break its own tests, but it could cause problems with downstream modules that +depend on it: + +1. Changing `backend_service_1` may require corresponding changes to `frontend_web_1`, and + if those changes are not coordinated the integration tests in `deployment_1` would fail + +2. Changing `frontend_web_utils` could potentially break both `frontend_web_1` and `frontend_web_2` + that depend on it, and thus we need to run the tests for both those modules to validate our change. + +In these cases, the failure mode of folder-based selective testing is: + +* You change code in a folder +* _That folder's_ tests may pass +* You merge your change into the main repository +* Only after merging, you notice _other folders' tests_ failing, which you did not + notice up front because you didn't run their tests before merging. But because you + merged the breaking change, you have inconvenienced other people working in other + parts of the codebase +* You now have to tediously revert your breaking change, or rush + a fix-forward to un-break the folders whose tests you broke, and unblock the developers + working in those folders + +Folder-based selective testing works fine for codebases with 10-100 developers: there are +occasional cases where a breakage might slip through, but generally it's infrequent enough +that it's tolerable. But as the development organization grows beyond 100, these breakages +affect more and more people and become more and more painful. To resolve this, we need +something more sophisticated. + +## Dependency-based Selective Testing + +To solve the problem of code changes potentially breaking downstream modules, we need to make +sure that for every code change, we run both the tests for that module as well as every downstream +test. For example, if we make a change to `backend_service_1`, we need to run the unit tests for +`backend_service_1` as well as the integration tests for `deployment_1`: + + +```graphviz +digraph G { + rankdir=LR + node [shape=box width=0 height=0 style=filled fillcolor=white] + + frontend_web_utils -> frontend_web_2 -> deployment_2 + frontend_web_utils -> frontend_web_1 -> deployment_1 + + backend_service_utils -> backend_service_3 -> deployment_3 + backend_service_utils -> backend_service_2 -> deployment_2 + + backend_service_utils -> backend_service_1 + backend_service_1 [color=red, penwidth=2] + deployment_1 [color=red, penwidth=2] + backend_service_1 -> deployment_1 [color=red, penwidth=2] +} +``` + +On the other hand, if we make a change to `frontend_web_utils`, we need to run the unit tests +for `frontend_web_1` and `frontend_web_2`, as well as the integration tests for `deployment_1` +and `deployment_2`, but _not_ `deployment_3` since (in this example) it doesn't depend on any frontend codebase: + +```graphviz +digraph G { + rankdir=LR + node [shape=box width=0 height=0 style=filled fillcolor=white] + frontend_web_utils [color=red, penwidth=2] + frontend_web_2 [color=red, penwidth=2] + deployment_2 [color=red, penwidth=2] + frontend_web_1 [color=red, penwidth=2] + deployment_1 [color=red, penwidth=2] + + frontend_web_utils -> frontend_web_1 -> deployment_1 [color=red, penwidth=2] + frontend_web_utils -> frontend_web_2 -> deployment_2 [color=red, penwidth=2] + + backend_service_utils -> backend_service_3 -> deployment_3 + backend_service_utils -> backend_service_2 -> deployment_2 + backend_service_utils -> backend_service_1 -> deployment_1 +} +``` + +This kind of dependency-based selective test execution is generally straightforward: + +1. You need to know which modules own which source files (e.g. based on the folder), +2. You need to know which modules depend on which other modules +3. Run a `git diff` on the code change you want to validate +4. For any modules which contain changed files, run a breadth-first traversal of the module graph +5. For all the modules discovered during the traversal, run their tests + +The algorithm (i.e. a breadth first search) is pretty trivial, the interesting part +is generally how you know _"which modules own which source files"_ and +_"which modules depend on which other modules"_. + +* For smaller projects this can be + managed manually in a bash or python script, e.g. + https://github.com/apache/spark/blob/290b4b31bae2e02b648d2c5ef61183f337b18f8f/dev/sparktestsupport/modules.py#L108-L126[this code in Apache Spark] + that manually maintains a list of source folders and dependencies per-module, + as well as what command in the underlying build tool you need to run + in order to test that module (`sbt_test_goals`): + +```python +tags = Module( + name="tags", + dependencies=[], + source_file_regexes=["common/tags/"], +) + +utils = Module( + name="utils", + dependencies=[tags], + source_file_regexes=["common/utils/"], + sbt_test_goals=["common-utils/test"], +) + +kvstore = Module( + name="kvstore", + dependencies=[tags], + source_file_regexes=["common/kvstore/"], + sbt_test_goals=["kvstore/test"], +) + +... +``` + +* In a larger project maintaining this information by hand is tedious and error prone, + so it is better to get the information from your build tool that already has it + (e.g. via xref:mill::large/selective-execution.adoc[Mill Selective Execution]). + +An alternate mechanism for achieving dependency-based selective testing is via caching +of test results, e.g. in tools like Bazel which support https://bazel.build/remote/caching[Remote Caching]. +In this approach, rather than using `git diff` and a graph traversal to decide what tests +to run, we simply run every test and rely on the fact that tests that are run without +any changes to their upstream dependencies will re-use a version from the cache automatically. +Although the implementation is different, this caching-based approach largely has the +same behavioral and performance characteristics as the ``git diff``-based approach +to dependency-based selective testing. + +### Limitations of Dependency-based Selective Testing + +Dependency-based selective test execution can get you pretty far: 100s to 1,000 developers +working on a shared codebase. But it still has weaknesses, and as the number of +developers grows beyond 1,000, you begin noticing issues and inefficiency: + +1. *You are limited by the granularity of your module graph*. For example, + `backend_service_utils` may be used by all three ``backend_service``s, + but not _all_ of `backend_service_utils` is used by all three services. Thus + a change to `backend_service_utils` may result in running tests for all three + ``backend_service``s, even if that change may not affect that particular service + +2. *You may over-test things redundantly*. For example, a function in `backend_service_utils` + may be exhaustively tested in ``backend_service_utils``'s own test suite. If so, running + unit tests for all three ``backend_service``s + as well as integration tests for all three ``deployment``s may be unnecessary, as they + will just exercise code paths that are already exercised as part of the `backend_service_utils` + test suite + + + +These failure modes are especially problematic for integration or end-to-end tests. +The nature of end-to-end tests is that they depend on _everything_, and so you find +_any change_ in your codebase triggering _every end-to-end_test_ to be run. These +are also the slowest tests in your codebase, so running every end-to-end test every time +you touch any line of code is extremely expensive and wasteful. + +For example, touching `backend_service_1` is enough to trigger all the `end_to_end` tests: + +```graphviz +digraph G { + rankdir=LR + node [shape=box width=0 height=0 style=filled fillcolor=white] + + frontend_web_utils -> frontend_web_2 -> deployment_2 + frontend_web_utils -> frontend_web_1 -> deployment_1 + + backend_service_utils -> backend_service_1 + backend_service_1 [color=red, penwidth=2] + deployment_1 [color=red, penwidth=2] + backend_service_1 -> deployment_1 [color=red, penwidth=2] + backend_service_utils -> backend_service_2 -> deployment_2 + backend_service_utils -> backend_service_3 -> deployment_3 + + deployment_1 -> staging_environment + deployment_2 -> staging_environment + deployment_3 -> staging_environment + staging_environment [color=red, penwidth=2] + end_to_end_test_1 [color=red, penwidth=2] + end_to_end_test_2 [color=red, penwidth=2] + end_to_end_test_3 [color=red, penwidth=2] + + staging_environment -> end_to_end_test_1 [color=red, penwidth=2] + staging_environment -> end_to_end_test_2 [color=red, penwidth=2] + staging_environment -> end_to_end_test_3 [color=red, penwidth=2] +} +``` + +Touching `frontend_web_2` is also enough to trigger all the `end_to_end` tests: + +```graphviz +digraph G { + rankdir=LR + node [shape=box width=0 height=0 style=filled fillcolor=white] + + + + frontend_web_2 [color=red, penwidth=2] + frontend_web_2 -> deployment_2 [color=red, penwidth=2] + frontend_web_utils -> frontend_web_1 -> deployment_1 + frontend_web_utils -> frontend_web_2 + deployment_2 [color=red, penwidth=2] + backend_service_utils -> backend_service_1 + backend_service_1 -> deployment_1 + backend_service_utils -> backend_service_2 -> deployment_2 + backend_service_utils -> backend_service_3 -> deployment_3 + + deployment_1 -> staging_environment + deployment_2 -> staging_environment + deployment_3 -> staging_environment + staging_environment [color=red, penwidth=2] + end_to_end_test_1 [color=red, penwidth=2] + end_to_end_test_2 [color=red, penwidth=2] + end_to_end_test_3 [color=red, penwidth=2] + + staging_environment -> end_to_end_test_1 [color=red, penwidth=2] + staging_environment -> end_to_end_test_2 [color=red, penwidth=2] + staging_environment -> end_to_end_test_3 [color=red, penwidth=2] +} +``` + +The two examples above demonstrate both failure modes: + +1. `staging_environment` is very coarse grained, causing all ``end_to_end_test``s to be run + even if they don't actually test the code in question + +2. Every `end_to_end_test` likely exercises the same setup/teardown/plumbing code, + in addition to the core logic under test, resulting in the same code being exercised + redundantly by many different tests + +This results in the selective testing system wasting both time and compute resources, +running tests that aren't relevant or repeatedly testing the same code paths over and over. +While there are some ways you can improve the granularity of the module graph to mitigate +these two issues, these issues are fundamental to dependency-based selective testing: + +* Dependency-based Selective Testing means _a problematic code change cases all affected tests to break_ +* For an effectivec CI system, all we need is that _every problematic code change breaks at least one test_ + +Fundamentally, CI just needs to ensure that every problematic code change breaks _at least one thing_, +because that is usually enough for the pull-request author to act on it and resolve the problem. +Running more tests to display more breakages is usually a waste of time and resources. Thus, +although dependency-based selective testing helps, it still falls short of the ideal of how a CI +system should behave. + +## Heuristic-based Selective Testing + +The next stage of selective testing that most teams encounter is using heuristics: these are +ad-hoc rules that you put in place to decide what tests to run based on a code change. +Common heuristics include: + +### Limiting Dependency Depth + +The chances +are that a breaking in module X will be caught by X's test suite, or the test suite of +X's direct downstream modules, so we don't need to run every single transitive downstream +module's test suite. e.g. if we set `N = 1`, then a change to `backend_service_1` shown below +will only run tests for `backend_service_1` and `deployment_1`, but not the ``end_to_end_test``s +downstream of the `staging_environment`: + +```graphviz +digraph G { + rankdir=LR + node [shape=box width=0 height=0 style=filled fillcolor=white] + + frontend_web_utils -> frontend_web_2 -> deployment_2 + frontend_web_utils -> frontend_web_1 -> deployment_1 + + backend_service_utils -> backend_service_1 + backend_service_1 [color=red, penwidth=2] + deployment_1 [color=red, penwidth=2] + backend_service_1 -> deployment_1 [color=red, penwidth=2] + backend_service_utils -> backend_service_2 -> deployment_2 + backend_service_utils -> backend_service_3 -> deployment_3 + + deployment_1 -> staging_environment + deployment_2 -> staging_environment + deployment_3 -> staging_environment + staging_environment -> end_to_end_test_1 + staging_environment -> end_to_end_test_2 + staging_environment -> end_to_end_test_3 +} +``` + +At my last job, we picked `N = 8` somewhat arbitrarily, but as a heuristic there is +no "right" answer, and the exact choice can be chosen to tradeoff between thoroughness +and test latency. The principle here is that "most" code is tested +by its own tests and those of its direct dependencies, so running tests for downstream +folders which are too far removed in the dependency graph is "generally" not useful. + +### Hard-coding Dependency Relationships + +For example if I change `deployment_1`, I can choose to ignore the `staging_environment` +when finding downstream tests, and only run `end_to_end_test_1` since it is end-to-end +tests for the `deployment_1` module. We represent this by rendering `staging_environment` +in dashed lines below, and adding additional arrows representing the hard-coded dependency +from `deployment_1` to `end_to_end_test_1`: + +```graphviz +digraph G { + rankdir=LR + node [shape=box width=0 height=0 style=filled fillcolor=white] + + frontend_web_utils -> frontend_web_2 -> deployment_2 + frontend_web_utils -> frontend_web_1 -> deployment_1 + + backend_service_utils -> backend_service_3 -> deployment_3 + backend_service_utils -> backend_service_2 -> deployment_2 + backend_service_utils -> backend_service_1 + + backend_service_1 [color=red, penwidth=2] + deployment_1 [color=red, penwidth=2] + backend_service_1 -> deployment_1 [color=red, penwidth=2] + + + deployment_3 -> staging_environment -> end_to_end_test_1 [style=dashed] + + deployment_2 -> staging_environment -> end_to_end_test_2 [style=dashed] + deployment_1 -> staging_environment -> end_to_end_test_3 [style=dashed] + staging_environment [style=dashed] + + + deployment_1 -> end_to_end_test_1 [color=red, penwidth=2] + end_to_end_test_1 [color=red, penwidth=2] + end_to_end_test_2 + deployment_3 -> end_to_end_test_3 +} +``` + +This approach does work, as developers usually have _some_ idea of what tests +should be run to exercise their application code. But maintaining these hard-coded +dependency relationships is difficult in a large and evolving codebase: + +* Generally only the most senior engineers with the most experience working in the + codebase are able to make such judgements + +* The nature of heuristics is that there is no right or wrong answer, and it is difficult + to determine whether a selection of hard-coded dependencies is good or not + +Fundamentally, tweaking magic configuration values to try and optimize an unclear +result is not a good use of developer time, but in some cases doing so may be necessary +in large codebases to keep pull-request validation time and cost under control. + +### Machine-Learning-based Selective Testing + +The last option I've seen in the wild is machine-learning based test selection. This +has two steps: + +1. Train a machine learning model using the 10,000 commits on the codebase, + with the `git diff` and any tests that were broken on. + +2. For any new pull-request, feed the `git diff` into the model trained in (1) above + to get a list of likely-affected tests, and only run those + +This approach basically automates the manual tweaking described in +<>, and instead of some senior engineer trying to use +their intuition to guess what tests to run, the ML model does the same thing. +You can then easily tune the model to optimize for different tradeoffs between latency +and thoroughness, depending on what is important for your development team at any point in time. + +One downside of machine-learning-based selective testing is the ML models are a black box, +and you have very little idea of why they do what they do. However, all of the heuristics +people use in <> are effectively black boxes anyway, +since you'll be hard-pressed to come up with an answer for why someone +<> decided to set `N = 8` rather than `7` or `9`, or why +someone <> decided on the exact hard-coded config they +ended up choosing. + + +### Limitations of Heuristics + +The nature of heuristics is that they are _approximate_. That means that it is possible +both that we run too may tests and waste time, and also that we run too few tests and allow +a breakage to slip through. Typically this sort of heuristic is only used early in the testing +pipeline, e.g. + +1. When validating pull-requests, use heuristics to trim down the set of tests to run before merging +2. After merging a pull-request, run a more thorough set of tests without heuristics + to catch any bugs that slipped through and prevent bugs from being shipped to customers. +3. If a bug is noticed during post-merge testing, bisect it and revert/fix the offending commit + +This may seem hacky and complicated, and bisecting/reverting commits post-merge can indeed +waste a lot of time. But such a workflow necessary in any large codebase and organization. +The heuristics also do not need to be 100% precise, and as long as they are precise _enough_ that +the time saved skipping tests outweighs the time spend dealing with post-merge breakages, it +still ends up being worth it. + +## Selective Testing in Mill + +The Mill build tool's xref:mill::large/selective-execution.adoc[Selective Test Execution] +supports <> out of the box. This makes it easy to set up +CI for your projects using Mill that only run tests that are downstream of the code +you changed in a pull-request. Selective Test Execution in Mill is implemented at the _task +level_, so even custom tasks and overrides can benefit from it. Mill's own pull-request +validation jobs benefit greatly for selective testing, and you can see documentation-only +pull-requests such as https://github.com/com-lihaoyi/mill/pull/4175[#4175] basically +https://github.com/com-lihaoyi/mill/actions/runs/12482782465[skipping the entire test suite] +since it did not touch any files that could affect those tests. + +However, although Mill provides better support for selective testing that most build tools +(which provide none), the <> do cause issues. +Even in Mill's own pull-request validation jobs, the fact that the most expensive integration +or end-to-end tests are selected every time causes slowness. What kind of +<> can help improve things remains an open question. + +If you are interested in build tools, especially where they apply to selective testing on +large codebases and monorepos, you should definitely give the +https://mill-build.org/[Mill Build Tool] a try! Mill's support for selective testing can +definitely help you keep pull-request validation times reasonable in a large codebase or +monorepo, which is key to keeping developers productive and development velocity fast +even as the size and scope of a project grows. diff --git a/blog/modules/ROOT/pages/4-flaky-tests.adoc b/blog/modules/ROOT/pages/4-flaky-tests.adoc new file mode 100644 index 00000000000..ef6a09a66de --- /dev/null +++ b/blog/modules/ROOT/pages/4-flaky-tests.adoc @@ -0,0 +1,437 @@ +// tag::header[] + +# How To Manage Flaky Tests + + +:author: Li Haoyi +:revdate: 1 January 2025 +_{author}, {revdate}_ + +include::mill:ROOT:partial$gtag-config.adoc[] + +Many projects suffer from the problem of flaky tests: tests that pass or fail +non-deterministically. These cause confusion, slow development cycles, and endless +arguments between individuals and teams in an organization. + +This article dives deep into working with flaky tests, from the perspective of someone +who built the first flaky test management systems at Dropbox and Databricks and maintained +the related build and CI workflows over the past decade. The issue of flaky tests can be +surprisingly unintuitive, with many "obvious" approaches being ineffective +or counterproductive. But it turns out there _are_ right and wrong answers to many of +these issues, and we will discuss both so you can better understand what managing flaky tests +is all about. + +// end::header[] + +## What Causes Flaky Tests? + +A flaky test is a test that sometimes passes and sometimes fails, non-deterministically. + +Flaky tests can be caused by a whole range of issues: + +### Race conditions within tests + +Often this manifest as `sleep`/`time.sleep`/`Thread.sleep` calls in your test +expecting some concurrent code path to complete, which may or may not wait +long enough depending on how much CPU contention there is slowing down your test code. +But any multi-threaded or multi-process code has the potential for race conditions or +concurrency bugs, and these days most systems make use of multiple cores. + +### Race conditions between tests + +Two tests both reading and writing +to the same global variables or files (e.g. in `~/.cache`) causing non-deterministic +outcomes. These can be tricky, because every test may pass when run alone, and the +test that fails when run in parallel may not be the one that is misbehaving! + +### Test ordering dependencies + +Even in the absence of parallelism, tests may interfere +with one another by mutating global variables or files on disk. Depending on the exact +tests you run or the order in which you run them, the tests can behave differently +and pass or fail unpredictably. Perhaps not strictly non-deterministic - the same +tests run in the same order will behave the same - but practically non-deterministic +since different CI runs may run tests in different orders. +xref:3-selective-testing.adoc[Selective Testing] may cause this kind of issue, +or dynamic load-balancing of tests between parallel workers to minimize total wall +clock time (which https://github.com/dropbox/changes[Dropbox's Changes CI system] did) + +### Resource contention + +Depending on exactly how your tests are run, they may rely +on process memory, disk space, file descriptors, ports, or other limited +physical resources. These are subject to noisy neighbour problems, e.g. an +overloaded linux system with too many processes using memory will start OOM-Killing +processes at random. + +### External flakiness + +Integration or end-to-end tests often +interact with third-party services, and it is not uncommon for the service +to be flaky, rate limit you, have transient networking errors, or just be entirely +down for some period of time. Even fundamental workflows like "downloading +packages from a package repository" can be subject to flaky failures + +Sometimes the flakiness is test-specific, sometimes it is actually flakiness in the +code being tested, which may manifest as real flakiness when customers are trying +to use your software. Both scenarios manifest the same to developers - a test passing +and failing non-deterministically when run locally or in CI. + +## Why Are Flaky Tests Problematic? + +### Development Slowdown + +Flaky tests generally make it impossible to know the state of your test suite, +which in turns makes it impossible to know whether the software you are working on +is broken or not, which is the reason you wanted tests in the first place. +Even a small number of flaky tests is enough to destroy the core value of your test suite. + +* Ideally, if the tests pass on a proposed code change, even someone + unfamiliar with the codebase can be confident that the code change did not break + anything. + +* Once test failures start happening spuriously, it quickly becomes + impossible to get a fully "green" test run without failures + +* So in order to make + validate a code change (merging a pull-request, deploying a service, etc.) the developer then + needs to individually triage and make judgements on those test failures to determine + if they are real issues or spurious + +This means that you are back to the "manual" workflow of developers squinting at +test failures to decide if they are relevant or not. This risks both false positives +and false negatives: + +* A developer may waste time triaging a failure that turns out to be a flake, and + unrelated to the change being tested + +* A developer may wrongly judge that a test failure is spurious, only for it to be a + real failure that causes breakage for customers once released. + +Even relatively low rates of flakiness can cause issues. For example, consider the following: + +* A codebase with 10,000 tests +* 1% of the tests are flaky +* Each flaky test case fails spuriously 1% of the time + +Although 1% of tests each failing 1% of the time may not seem like a huge deal, it means +that someone running the entire test suite only has a `0.99^100 = ~37%` chance of getting +a green test report! The other 63% of the time, someone running the test suite without any +real breakages gets one or more spurious failures, that they then have to spend time and energy +triaging and investigating. If the developer needs to retry the test runs to get a successful +result, they would need to retry on average `1 / 0.37 = 2.7` times: in this scenario +the retries alone may be enough to increase your testing latencies and infrastructure costs by +170%, on top of the manual work needed to triage and investigate the test failures! + + +### Inter-team Conflict + +One fundamental issue with flaky tests is organizational: + +* The team that owns a test benefits from the test running and providing coverage + for the code they care about + +* Other teams that run the test in CI suffer from the spurious failures and wasted time + that hitting flaky tests entails + +xref:3-selective-testing.adoc[Selective Testing] can help mitigate this to some extent by +letting you avoid running unrelated tests, but it doesn't make the problem fully disappear. +For example, a downstream service may be triggered every time an upstream utility library +is changed, and if the tests are flaky and the service and library are owned by different +teams, you end up with the conflict described above. + +What ends up happening is that _nobody_ prioritizes fixing their flaky tests, because +that is the selfishly-optimal thing to do, but as a result _everyone_ +suffers from _everyone else's_ flaky tests, even if everyone would be better if all flaky tests +were fixed. This is a classic https://en.wikipedia.org/wiki/Tragedy_of_the_commons[Tragedy of the Commons], +and as long as flaky tests are allowed to exist, this will result in +endless debates or arguments between teams about who needs to fix their flaky tests, +wasting enormous amounts of time and energy. + +## Mitigating Flaky Tests + +In general it is impossible to completely avoid flaky tests, but you can take steps to +mitigate them: + +1. Avoid race conditions in your application code to prevent random crashes or behavioral changes + affecting users, and avoid race conditions in your test code + +2. Run parallel test processes inside "sandbox" empty temp folders, to try and avoid + them reading and writing to the same files on the filesystem and risking race conditions. + (See xref:mill:ROOT:depth/sandboxing.adoc[Mill Sandboxing]) + +3. Run test processes inside CGroups to mitigate resource contention: e.g. if every test process is limited + in how much memory it uses, it cannot cause memory pressure that might cause other tests + to be OOM-killed (See Bazel's https://github.com/bazelbuild/bazel/pull/21322[Extended CGroup Support], + which we implemented in https://www.databricks.com/blog/2021/10/14/developing-databricks-runbot-ci-solution.html[Databricks' Runbot CI system]) + +4. Mock out external services: e.g. AWS and Azure can be mocked using https://www.localstack.cloud/[LocalStack], parts of Azure + Kubernetes can be mocked using https://kind.sigs.k8s.io/[KIND], etc.. + +5. xref:3-selective-testing.adoc[Selective Testing], e.g. via + xref:mill::large/selective-execution.adoc[Mill's Selective Test Execution], reduces the + number of tests you run + and thus the impact of flakiness, + +However, although you can mitigate the flakiness, you should not expect to make it go away +entirely. + +* Race conditions _will_ find their way into your code despite your best efforts, and you _will_ + need some hardcoded timeouts to prevent your test suite hanging forever. + +* There will always be _some_ limited physical resource you didn't realize could run out, + until it does. + +* Mocking out third-party services never ends up working 100%: inevitably + you hit cases where the mock isn't accurate enough, or trustworthy enough, and you still + need to test against the real service to get confidence in the correctness of your system. + +End-to-end tests and integration tests are especially prone to flakiness, as are UI +tests exercising web or mobile app UIs. + +As a developer, you should work hard in trying to make your application and test +code as deterministic as possible. You should have a properly-shaped +https://martinfowler.com/articles/practical-test-pyramid.html[Test Pyramid], with more small unit +tests that tend to be stable and fewer integration/end-to-end/UI tests that tend to be flaky. +But you should also accept that despite your best efforts, flaky tests _will_ appear, and so you +will need some plan or strategy to deal with them when they do. + +## How Not To Manage Flaky Tests + +Flaky test management can be surprisingly counter-intuitive. Below we discuss some common +mistakes people make when they first start dealing with flaky tests. + +### Do Not Block Code Changes on Flaky Tests + +The most important thing to take note of is that you should not block +code changes on flaky tests: merging pull-requests, deploying services, etc. + +That is despite blocking code changes being the default and most obvious behavior: e.g. +if you wait for a fully-green test run before merging a code change, and a flaky test +makes the test run red, then it blocks the merge. However, this is not a good workflow +for a variety of reasons: + +1. A flaky failure when testing a code change does not indicate the code change caused + that breakage. So blocking the merge on the flaky failure just prevents progress + without actually helping increase system quality. + +2. The flaky test may be in a part of the system totally unrelated to the code change + being tested, which means the individual working on the code change has zero context + on why it might be flaky, and unexpectedly context switching to deal with the flaky test + is mentally costly. + +3. Blocking progress on a flaky test introduces an incentives problem: The code/test owner + benefits from the flaky test's existence, but other people working in that codebase + get blocked with no benefit. This directly leads to the endless xref:_inter_team_conflict[] + mentioned earlier. + +Although _"all tests should pass before merging"_ is a common requirement, it is ultimately +unhelpful when you are dealing with flaky tests. + +### Preventing Flaky Tests From Being Introduced Is Hard + +It can be tempting to try and "https://en.wikipedia.org/wiki/Shift-left_testing[Shift Left]" +your flaky test management, to try and catch them before they end up landing in your codebase. +But doing so ends up being surprisingly difficult. + +Consider the example we used earlier: 10,000 tests, with 1% of them flaky, each failing 1% of +the time. These are arbitrary numbers but pretty representative of what you will likely find +in the wild + +1. If someone adds a new test case, in order to have a 95% confidence that it is not flaky, + you would need to run it about 300 times (`log(0.05) / log(0.99)`). + +2. Even if we do run every new test 300 times, that 1 in 20 flaky tests will still slip through, + and over time will still build up into a population of flaky tests actively causing flakiness + in your test suite + +3. Furthermore, many tests are not flaky alone! Running the same test 300 times in + isolation may not demonstrate any flakiness, since e.g. the test may only be flaky when + run in parallel with another test due to <> or <>, + or in a specific order after other tests due to <>. + +4. Lastly, it is not only new tests that are flaky! When I was working on this area at Dropbox + and Databricks, the majority of flaky tests we detected were existing tests that + were stable for days/weeks/months before turning flaky (presumably due to a code change + in the application code or test code). Blocking new tests that are flaky does nothing + to prevent the code changes causing old tests to become flaky! + +To block code changes that cause either new and old tests from becoming flaky, we would need +to run every single test about 300 times on each pull request, to give us 95% confidence that +each 1% flaky test introduced by the code change would get caught. This is prohibitively +slow and expensive, causing a test suite that may take 5 minutes to run costing $1 to instead +take 25 hours to run costing $300. + +In general, it is very hard to block flaky tests "up front". You have to accept that +over time some parts of your test suite will become flaky, and then make plans on how +to respond and manage those flaky tests when they inevitably appear. + +## Managing Flaky Tests + +Once flaky tests start appearing in your test suite, you need to do something about them. +This generally involves (a) noticing that flaky tests exist, (b) identifying which tests +are flaky, and (c) mitigating those specific problematic test to prevent them from +causing pain to your developers. + +### Monitor Flaky Tests Asynchronously + +As mentioned earlier, <>. +Thus, you must assume that flaky tests _will_ make their way into your test suite, +and monitor the flakiness when it occurs. This can be done in a variety of ways, for example: + +1. Most CI systems allow manual retries, and developers usually retry tests they suspect are + flaky. If a test fails once then passes when retried on the same version of the code, it + was a flaky failure. This is the metric we used in + https://www.databricks.com/blog/2021/10/14/developing-databricks-runbot-ci-solution.html[Databricks' CI system] + to monitor the flaky test numbers. + +2. Some CI systems or test frameworks have automatic retries: e.g. in https://github.com/dropbox/changes[Dropbox's Changes CI system] + all tests were retried twice by default. If a test fails initially and then + passes on the retry, it is flaky: the fact that it's non-deterministic means that + next time, it might fail initially and then fail on the retry! + +3. Most CI systems run tests to validate code changes before merging, and then run tests + again to validate the code post-merge. Post-merge should "always" be green, but sometimes + suffers breakages or flakiness. If a test passes, fails, then passes on three consecutive + commit test runs post-merge, it is likely to be flaky. Breakages + tend to cause a string of consecutive test failures before being fixed or reverted, and + very rarely get noticed and dealt with immediately + +Notably, most test failures when validating code changes (e.g. on pull requests) are not useful +here: tests are _meant_ to break when validating code changes in order to catch problems! +Hence the need for the slightly-roundabout ways above to determine what tests are flaky, +by looking for failures at times when you wouldn't expect failures to occur. + +Once you have noticed a test is flaky, there are two main options: retries and quarantine + +### Retrying Flaky Tests + +Retries are always controversial. A common criticism is that they can mask real flakiness +in the system that can cause real problems to customers, which is true. However, +we already discussed why we xref:_do_not_block_code_changes_on_flaky_tests[should +not block code changes on flaky tests], since doing so just causes pain while +not being an effective way of getting the flakiness fixed. + +Furthermore, developers +are going to be manually retrying flaky tests anyway: whether by restarting the job +validating their pull request, or running the test manually on their laptop or devbox +to check if it's truly broken. Thus, we should feel free to add automatic retries around +flaky tests to automate that tedious manual process. + +Retrying flaky tests can be surprisingly effective. As mentioned earlier, even +infrequently flaky tests can cause issues, with a small subset of tests flaking +1% of the time being enough to block all progress. However, one retry +turns it into a 0.01% flaky test, and two retries turns it into a 0.0001% flaky test. +So even one or two retries is enough to make most flaky tests stable enough to not cause issues. + +Retrying flaky tests has two weaknesses: + +#### Retries can be expensive for real failures + +If you retry a test twice, that +means that an actually-failed test would run three times before giving up. +If you retry every test by default, and a code change breaks a large number of +them, running all those failing tests three times can be a significant performance +and latency penalty + +To mitigate this, you should generally avoid "blanket" retries, and only add +retries around specific tests that you have detected as being flaky + +#### Retries may not work if not coarse grained enough + +For example, if `test_a` fails +due to interference with `test_b` running concurrently, retrying `test_a` +immediately while `test_b` is still running will fail again. Or if the flakiness is +due to some bad state on the filesystem, the test may continue flaking until +it is run on a completely new machine with a clean filesystem. + +This failure mode can be mitigated by retrying the failed tests only after the +entire test suite has completed, possibly on a clean test machine. + +### Auto-Quarantining Flaky Tests + +Quarantine involves detecting that a test is flaky, and simply not counting it when deciding +whether or not to accept a code change for merge or deployment. + +This is much more aggressive than retrying flaky tests, as even real breakages will get +ignored for quarantined tests. You effectively lose the test coverage given by a particular +test for the period while it is quarantined. Only when someone eventually fixes the flaky test +can it be removed from quarantine and can begin running and blocking code changes again. + +Quarantining is best automated, both to remove busy-work of finding/quarantining +flaky tests, and to avoid the inevitable back-and-forth between the people +quarantining the tests and the people whose tests are getting quarantined. + + +#### Why Quarantine? + +The obvious value of quarantining flaky tests is that it unblocks merging of code changes +by ignoring flaky tests that are probably not relevant. Quarantime basically automates what +people do manually in the presence of flaky tests anyway: + +* When enough tests are flaky, eventually developers are going to start merging/deploying code + changes despite the failures being present, because getting a "fully green" test run is + impossible + +* When that happens, the developer is not going to be able to tell whether the failure + is flaky or real, so if a code change causes a real breakage in that test the + developer is likely going to not notice and merge/deploy it anyway! + +So although naively it seems like quarantining flaky tests cost you test coverage, in +reality it costs you nothing and simply automates the loss of coverage that you are going +to suffer anyway. It simply saves a lot of manual effort in having your developers manually +deciding which test failures to ignore based on what tests they remember to be flaky, since +now the quarantine system remembers the flaky tests and ignores them on your behalf. + +#### Why Quarantine? Part 2 + +The non-obvious value of quarantining flaky tests is that it aligns incentives across a +development team or organization: + +* Normally, a flaky test meant the test owner continues to benefit from the test coverage while + other teams suffered from the flakiness + +* With auto-quarantine, a flaky test means the test owner both benefits from the test coverage for + health tests and suffers the lack of coverage caused by their flaky test being quarantined. + +This aligning of incentives means that with auto-quarantine enabled, the normal +endless discussions and disputes about flaky tests tend to disappear. The test owner +can decide themselves how urgently they need to fix a quarantined flaky test, depending +on how crucial that test coverage is, or even if they should fix it at all! Other teams +are not affected by the quarantined flaky test, and do not care what the test owner ends +up deciding + +Most commonly, quarantining is automatic, while un-quarantining a test can be automatic or manual. +Due to the non-determinstic nature of flakiness, it's often hard to determine whether a flaky +test has been truly fixed or not, but it turns out it doesn't matter. If you try to fix a test, +take it out of quarantine, and it turns out to be still flaky, the auto-quarantine system will +just put it back into quarantine for you to take another look at it. + +## Implementing Flaky Test Management Systems + +So far, all the discussion in this article has been at a high level. Exactly how to implement +it is left as an exercise to the reader, but is usually a mix of: + +* `retry{}` helpers in multiple languages you can sprinkle through your test code where necessary +* A SQL database storing historical test results and retries +* A SQL database or a text file committed in-repo to track quarantined tests +* A service that looks at historical test results and retries and decides when/if to quarantine a test +* Tweaks to your existing CI system to be able to work with all of the above: ignoring quarantined + tests, tracking retry counts, tracking test results, etc. +* Some kind of web interface giving you visibility into all the components and workflows above, + so when things inevitably go wrong you are able to figure out what's misbehaving + +Usually flaky test management starts off as an entirely manual process, which works fine for small +projects. But as the size of the project grows, you inevitably need to augment the manual work +with some basic automation, and over time build out a fully automated system to do what you want. +So far I have not seen a popular out-of-the-box solution for this, and in my interviews with ~30 +silicon valley companies it seems everyone ends up building their own. The +https://github.com/dropbox/changes[Dropbox CI System] and +https://www.databricks.com/blog/2021/10/14/developing-databricks-runbot-ci-solution.html[Databricks CI System] +I worked on both had their flaky test management bespoke and built in to the infrastructure. + +None of the techniques discussed in this article are rocket science, and the challenge is mostly +just plumbing the necessary data back and forth between different parts of your CI system. But +hopefully this high-level discussion of how to manage flaky tests should give you a head start, +and save you the weeks or months it would take to learn the same +things that I have learned working on flaky tests over the past decade. diff --git a/blog/modules/ROOT/pages/5-executable-jars.adoc b/blog/modules/ROOT/pages/5-executable-jars.adoc new file mode 100644 index 00000000000..37c6d598287 --- /dev/null +++ b/blog/modules/ROOT/pages/5-executable-jars.adoc @@ -0,0 +1,370 @@ +// tag::header[] + +# How JVM Executable Assembly Jars Work + + +:author: Li Haoyi +:revdate: 2 January 2025 +_{author}, {revdate}_ + +include::mill:ROOT:partial$gtag-config.adoc[] + +One feature of the https://mill-build.org[Mill JVM build tool] is that the +assembly jars it creates are directly executable: + +```bash +> ./mill show foo.assembly # generate the assembly jar +"ref:v0:bd2c6c70:/Users/lihaoyi/test/out/foo/assembly.dest/out.jar" + +> out/foo/assembly.dest/out.jar # run the assembly jar directly +Hello World +``` + +Other JVM build tools also can generate assemblies, but most need you to run them +via `java -jar` or `java -cp`, +or require you to use https://docs.oracle.com/en/java/javase/11/tools/jlink.html[jlink] or +https://docs.oracle.com/en/java/javase/17/docs/specs/man/jpackage.html[jpackage] +which are much more heavyweight and troublesome to set up. Mill automates that, and while not +groundbreaking, it is a nice convenience that makes your JVM +code built with Mill fit more nicely into command-line centric workflows common in modern +software systems. + +This article will discuss how Mill's executable assemblies are implemented, so perhaps +other build tools and toolchains will be able to provide the same convenience + +// end::header[] + +## Trying Out Mill's Executable Assemblies + +To try out Mill's executable assembly jars, you can reproduce the above steps +with the following code and config: + +```scala +// build.mill +import mill._, javalib._ + +object foo extends JavaModule +``` + +```java +// foo/src/Foo.java +package foo; + +public class Foo{ + public static void main(String[] args){ + System.out.println("Hello World"); + } +} +``` + +```bash +> ./mill show foo.assembly +"ref:v0:bd2c6c70:/Users/lihaoyi/test/out/foo/assembly.dest/out.jar" + +> /Users/lihaoyi/test/out/foo/assembly.dest/out.jar +Hello World +``` + +Mill's ``JavaModule``s come with `.assembly` tasks built in by default, without needing +to install plugins like is necessary in other build tools like +https://maven.apache.org/plugins/maven-assembly-plugin/usage.html[Maven] or +https://github.com/sbt/sbt-assembly[SBT]. + +While the above example is a trivial single-module project, this also works for more complicated +projects with multiple modules and third-party dependencies. The assembly jar will aggregate +the code from all upstream modules and dependencies into a single `.jar` file that you can then +execute from the command line. + +Most `.jar` files are not directly executable, hence the need for a `java -jar` or `java -cp` +command to run them. To understand how Mill makes direct execution +possible, we first need to understand what a `.jar` file is. + +## What is an Assembly Jar? + +An "assembly" jar is just a jar file that includes all transitive dependencies. +What makes an assembly different from a "normal" jar is that it should (in theory) contain +everything needed to run you JVM program. In contrast, most "normal" jars do not contain +their dependencies, and you need to separately go download those dependencies and pass them in +via `-classpath`/`-cp` before you can run your Java program. + +One thing that many people don't know is that jar files are just zip files. You can see +that from the command line, where although you normally use `jar tf` to list the contents +of a `.jar` file, `unzip -l` works as well: + +```bash +> jar tf /Users/lihaoyi/test/out/foo/assembly.dest/out.jar +META-INF/MANIFEST.MF +META-INF/ +foo/ +foo/Foo.class +``` +```bash +> unzip -l /Users/lihaoyi/test/out/foo/assembly.dest/out.jar +Archive: /Users/lihaoyi/test/out/foo/assembly.dest/out.jar +warning [/Users/lihaoyi/test/out/foo/assembly.dest/out.jar]: 203 extra bytes at beginning or within zipfile + (attempting to process anyway) + Length Date Time Name +--------- ---------- ----- ---- + 110 01-02-2025 12:05 META-INF/MANIFEST.MF + 0 01-02-2025 12:05 META-INF/ + 0 01-02-2025 12:05 foo/ + 415 01-02-2025 12:05 foo/Foo.class +--------- ------- + 525 4 files +``` + +In this case, the example project only has one `Foo.java` source file, compiled into a single +`Foo.class` JVM class file. Larger projects will have multiple class files, including those +from upstream modules and third-party dependencies. + +In addition to the compiled class files, jars also can contain metadata. For example, we can see +this generated `out.jar` contains a `META-INF/MANIFEST.MF` file, which contains some basic +metadata including the `Main-Class: foo.Foo` which is the entrypoint of the Java program: + +```bash +$ unzip -p /Users/lihaoyi/test/out/foo/assembly.dest/out.jar META-INF/MANIFEST.MF +warning [/Users/lihaoyi/test/out/foo/assembly.dest/out.jar]: 203 extra bytes at beginning or within zipfile + (attempting to process anyway) +Manifest-Version: 1.0 +Created-By: Mill 0.12.4-23-2ff492 +Tool: Mill-0.12.4-23-2ff492 +Main-Class: foo.Foo +``` + +The `warning: 203 extra bytes at beginning or within zipfile` is a hint that although +this is a valid zip file, something unusual is going on, which leads us to the trick +that we use to make the `out.jar` file executable + +## What is a Zip file? + +A https://en.wikipedia.org/wiki/ZIP_(file_format)[zip file] is an archive made of multiple smaller files, individually compressed, +concatenated together followed by a "central directory" containing the _reverse offsets_ of +every file within the archive, relative to the central directory. + +```graphviz +digraph G { + label="archive.zip" + node [shape=box width=0 height=0 style=filled fillcolor=white] + zip [shape=record label=" Foo.class | MANIFEST.MF | ...other files... | central directory"] + zip:f2:n -> zip:f1:n [label="reverse offsets"] + zip:f2:n -> zip:f0:n + zip:f2:n -> zip:f3:n +} +``` + +The typical way someone reads from a zip file is a follows: + +* Seek to the end of zip and find the central directory +* Find the metadata containing the offset for the file you want +* Seek backwards using that offset to the start of the entry you want +* Read and decompress your entry + +Unlike `.tar.gz` files, the entries within a `.zip` file are compressed individually. This +is convenient for use cases like Java classfiles where you want to lazily load them +individually on-demand without having to first decompress the whole archive up front. + +## Executable Zip Archives + +One quirk of the above Zip format is that _the zip data does not need to start at the +beginning of the file_! The zip data can be at the end of an arbitrarily long file, and +as long as programs can scan to the end of the zip to find the central directory, they +will be able to extract the zip. + +```graphviz +digraph G { + node [shape=box width=0 height=0 style=filled fillcolor=white] + label="archive.zip" + extra_label:s -> zip:fe:n [color=red penwidth=3] + extra_label [color=white style=invisible] + zip [shape=record label=" ...extra data... | Foo.class | MANIFEST.MF | ...other files... | central directory"] + zip:f2:n -> zip:f1:n + zip:f2:n -> zip:f0:n + zip:f2:n -> zip:f3:n +} +``` + +Thus, we can actually use the `.zip` format in two ways: + +1. As a `.zip` file, which is read and extracted starting from the end of the file on the right +2. As something else, such as a bash script, which is read and executed starting from start of the file on the left + +This technique is used in common Zip +https://en.wikipedia.org/wiki/Self-extracting_archives[self-extracting archives], where +a short bash script is pre-pended to the zip archive that when run extracts the archive using +`unzip`. Although +this article is about Jars, `.jar` files are really just ``.zip``s with a different name! +So we can prepend a bash script to our `.jar` file to + +* Run `java` with the current executable `"$0"` as the classpath +* Pass any of the current executable's command-line arguments `"$@"`as the Java program's command-line arguments +* Allow configuration of the `java` process (since we're no longer calling it ourselves) via a `JAVA_OPTS` environment variable + +```graphviz +digraph G { + label="out.jar" + left [shape=plaintext label="bash script starts executing at start of file\nruns `java` passing itself as the classpath"] + right [shape=plaintext label="`java` loads compiled classfiles from jar/zip\nby reading the central directory at end of file"] + + node [shape=box width=0 height=0 style=filled fillcolor=white] + zip [shape=record label=" exec java $JAVA_OPTS -cp \"$0\" 'foo.Foo' \"$@\" | Foo.class | MANIFEST.MF | ...other files... | central directory"] + zip:f2:n -> zip:f1:n + zip:f2:n -> zip:f0:n + zip:f2:n -> zip:f3:n + left -> zip:fe:n [color=red penwidth=3] + zip:f2:s -> right [dir=back color=red penwidth=3] +} +``` + +If you use `less out.jar` to look at what's inside the Jar file, it looks like this: + +```bash +exec java $JAVA_OPTS -cp "$0" 'foo.Foo' "$@" +PK^C^D^T^@^H^H^H^@`"Z^@^@^@^@^@^@^@^@^@^@^@^@^T^@^Q^@META-INF/MANIFEST.MFUT^M^@^G<97>^Pvg<97>^Pvg<97>^Pvgeɱ +<80> ^P^@^?^_81s1-^OR^P^CuESC^Z{<8B>JNcҕ(<.=L7<8F>XjE^W^]ٕln +N<91>%3ri^T*<8F>1<8B>CD<81><82>^WPK^G^HB?^Xo[^@^@^@n^@^@^@PK^C^D +^@^@^@^@^@`"Z^@^@^@^@^@^@^@^@^@^@^@^@ ^@^Q^@META-INF/UT^M^@^G<97>^Pvg<97>^Pvg<97>^PvgPK^C^D +^@^@^@^@^@`"Z^@^@^@^@^@^@^@^@^@^@^@^@^D^@^Q^@foo/UT^M^@^G<97>^Pvg<97>^Pvg<97>^PvgPK^C^D^T^@^H^H^H^@`"Z^@^@^@^@^@^@^@^@^@^@^@^@^M^@^Q^@foo/Foo.classUT^M^@^G<97>^Pvg<97>^Pvg<97>^Pvgm<90>J@^T<86>[<9A>4z-BH]<<98>^G^Q<8A><8B>B.\XL2^R^S҅<82>^K^_<87>^R^DA<85>^C<^C ^NJ([Fh-WQ2/'^K1^Hc<99><94>P^FcESCu^V^^\^W^M!^S1S:gQ7(~R<99>da<96><8A>(^^ֱJh<9C>^KލNA^Kk^V.:X't<96><88>^HֽT®^gap0cNk^?5rg<82>^Ld".x"hxR<89>#&=v<99>^K u<<9E>NH^Z^G^F>|#J s%<8E>ESC9^SESC<99>^K&Z1,^V^?ЃB +/+hN]^ZNeESCPK^G^H<94>r+6 ^A^@^@<9F>^A^@^@PK^A^B^T^@^T^@^H^H^H^@`"ZB?^^Xo[^@^@^@n^@^@^@^T^@ ^@^@^@^@^@^@^@^@^@^@^@^@^@^@^@META-INF/MANIFEST.MFUT^E^@^G<97>^PvgPK^A^B +^@ +^@^@^@^@^@`"Z^@^@^@^@^@^@^@^@^@^@^@^@ ^@ ^@^@^@^@^@^@^@^@^@^@^@^@^@^@META-INF/UT^E^@^G<97>^PvgPK^A^B +^@ +^@^@^@^@^@`"Z^@^@^@^@^@^@^@^@^@^@^@^@^D^@ ^@^@^@^@^@^@^@^@^@^@^@^@^@^@foo/UT^E^@^G<97>^PvgPK^A^B^T^@^T^@^H^H^H^@`"Z<94>r+6 ^A^@^@<9F>^A^@^@^M^@ ^@^@^@^@^@^@^@^@^@^@^@^Y^A^@^@foo/Foo.classUT^E^@^G<97>^PvgPK^E^F^@^@^@^@^D^@^D^@ +^A^@^@<85>^B^@^@^@^@ +/Users/lihaoyi/test/out/foo/assembly.dest/out.jar (END) +``` + +Here, you can see a single line of `exec java $JAVA_OPTS -cp "$0" 'foo.Foo' "$@"` which +is the bash script we prepended to the zip, followed by the un-intelligible compressed +class file data that makes up the `.jar`. Since now you are running the Java program +via `./out.jar` instead of `java -jar`, we expose the `JAVA_OPTS` environment variable +as a way to pass flags to the `java` command that ends up being run.] + +## What about Windows? + +The self-executing jar file above works by prepending a shell script. This works on Unix +environments like Linux or Mac, but not on the Windows machines which are also very common. + +To fix this, we can replace our shell script zip prefix with a "universal" script that +is both a valid `.sh` program as well as valid `.bat` program, the latter being the +standard windows command line language. Thus, instead of: + +```bash +exec java $JAVA_OPTS -cp "$0" 'foo.Foo' "$@" +``` + +We can instead use: + +```bash +@ 2>/dev/null # 2>nul & echo off & goto BOF +: +exec java $JAVA_OPTS -cp "$0" 'foo.Foo' "$@" +exit + +:BOF +setlocal +@echo off +java %JAVA_OPTS% -cp "%~dpnx0" foo.Foo %* +endlocal +exit /B %errorlevel% +``` + + +This universal launcher script is worth digging into. + +In a `sh` shell: + +* `@ 2>/dev/null # 2>nul & echo off & goto BOF` is an invalid command, but we ignore + the error because we pipe it to `/dev/null` + +* It then runs the `exec java -cp` command + +* We `exit` the script before we hit the invalid shell code below + +In a `bat` environment: + +* We run the first line, doing nothing, until we hit `goto BOF`. This jumps over the `exec java` + line which is not valid `bat` code, to go straight to the `:BOF` label + +* We then run `java -cp`, but with slightly different syntax from the unix/shell version above + (e.g. `%~dpnx0` instead of `$0`) for windows/bat compatibility + +* We then `exit` the script, using `/B %errorlevel%` which is the windows syntax for propagating + the exit code, before we hit the compressed data below which is not valid `bat` code. + +As a result, we have a short script that we can call either from `sh` or `bat`, +that forwards arguments and the script itself (which is also a `.jar` file) to `java -cp`, +and then forwards the exit code back from `java -cp` to the caller. Although the script may +look fragile, the strong backwards compatibility of `.sh` and `.bat` scripts means that +once working it is unlikely to break in future versions of Mac/Linux/Windows. + +If we look at the file using `less -n20`, we can now see our universal launcher script +pre-pended to the blobs of compressed classfile data that make up the rest of the jar: + +```bash +@ 2>/dev/null # 2>nul & echo off & goto BOF +: +exec java $JAVA_OPTS -cp "$0" 'foo.Foo' "$@" +exit + +:BOF +setlocal +@echo off +java %JAVA_OPTS% -cp "%~dpnx0" foo.Foo %* +endlocal +exit /B %errorlevel% +PK^C^D^T^@^H^H^H^@`"Z^@^@^@^@^@^@^@^@^@^@^@^@^T^@^Q^@META-INF/MANIFEST.MFUT^M^@^G<97>^Pvg<97>^Pvg<97>^Pvgeɱ +<80> ^P^@^?^_81s1-^OR^P^CuESC^Z{<8B>JNcҕ(<.=L7<8F>XjE^W^]ٕln +N<91>%3ri^T*<8F>1<8B>CD<81><82>^WPK^G^HB?^Xo[^@^@^@n^@^@^@PK^C^D +^@^@^@^@^@`"Z^@^@^@^@^@^@^@^@^@^@^@^@ ^@^Q^@META-INF/UT^M^@^G<97>^Pvg<97>^Pvg<97>^PvgPK^C^D +^@^@^@^@^@`"Z^@^@^@^@^@^@^@^@^@^@^@^@^D^@^Q^@foo/UT^M^@^G<97>^Pvg<97>^Pvg<97>^PvgPK^C^D^T^@^H^H^H^@`"Z^@^@^@^@^@^@^@^@^@^@^@^@^M^@^Q^@foo/Foo.classUT^M^@^G<97>^Pvg<97>^Pvg<97>^Pvgm<90>J@^T<86>[<9A>4z-BH]<<98>^G^Q<8A><8B>B.\XL2^R^S҅<82>^K^_<87>^R^DA<85>^C<^C ^NJ([Fh-WQ2/'^K1^Hc<99><94>P^FcESCu^V^^\^W^M!^S1S:gQ7(~R<99>da<96><8A>(^^ֱJh<9C>^KލNA^Kk^V.:X't<96><88>^HֽT®^gap0cNk^?5rg<82>^Ld".x"hxR<89>#&=v<99>^K u<<9E>NH^Z^G^F>|#J s%<8E>ESC9^SESC<99>^K&Z1,^V^?ЃB +/+hN]^ZNeESCPK^G^H<94>r+6 ^A^@^@<9F>^A^@^@PK^A^B^T^@^T^@^H^H^H^@`"ZB?^^Xo[^@^@^@n^@^@^@^T^@ ^@^@^@^@^@^@^@^@^@^@^@^@^@^@^@META-INF/MANIFEST.MFUT^E^@^G<97>^PvgPK^A^B +^@ +^@^@^@^@^@`"Z^@^@^@^@^@^@^@^@^@^@^@^@ ^@ ^@^@^@^@^@^@^@^@^@^@^@^@^@^@META-INF/UT^E^@^G<97>^PvgPK^A^B +^@ +^@^@^@^@^@`"Z^@^@^@^@^@^@^@^@^@^@^@^@^D^@ ^@^@^@^@^@^@^@^@^@^@^@^@^@^@foo/UT^E^@^G<97>^PvgPK^A^B^T^@^T^@^H^H^H^@`"Z<94>r+6 ^A^@^@<9F>^A^@^@^M^@ ^@^@^@^@^@^@^@^@^@^@^@^Y^A^@^@foo/Foo.classUT^E^@^G<97>^PvgPK^E^F^@^@^@^@^D^@^D^@ +^A^@^@<85>^B^@^@^@^@ +/Users/lihaoyi/test/out/foo/assembly.dest/out.jar (END) +``` + +We can run it directly on Mac/Linux: + +```bash +> ./mill show foo.assembly # generate the assembly jar +"ref:v0:bd2c6c70:/Users/lihaoyi/test/out/foo/assembly.dest/out.jar" + +> out/foo/assembly.dest/out.jar # run the assembly jar directly +Hello World +``` + +And we can run it on windows, although we need to rename +`out.jar` to `out.bat` before executing it: + +```bash +> ./mill show foo.assembly +"ref:v0:bd2c6c70:C:\\Users\\haoyi\\test\\out\\foo\\assembly.dest\\out.jar" + +> cp out\foo\assembly.dest\out.jar out.bat + +> ./out.bat +Hello World +``` + +## Conclusion + +The executable assembly jars that Mill generates are very convenient; it means that +you can use Mill to compile (almost) any Java program into an executable you can run with +`./out.jar`, as long as you have the appropriate version of Java globally installed. This +is much easier than setting up JLink or JPackage. You can even have an executable jar that +runs on all of Mac/Linux/Windows just by carefully crafting a launcher script that runs +on all platforms. + +The Mill JVM build tool provides these executable assembly jars out-of-the-box, the SBT +build tool as part of the https://github.com/sbt/sbt-assembly[SBT Assembly] plugin, +via the `prependShellScript` config. +Maven and Gradle do not provide this by default but it is pretty easy to set up yourself +simply by concatenating a shell script with an assembly jar, as described above. + +Although running Java programs via +`java -jar` or `java -cp` is not a huge hardship, removing that friction really helps your +Java programs and codebase feel like a first class citizen on the command-line. + diff --git a/blog/modules/ROOT/pages/6-garbage-collector-perf.adoc b/blog/modules/ROOT/pages/6-garbage-collector-perf.adoc new file mode 100644 index 00000000000..559c0c43dc3 --- /dev/null +++ b/blog/modules/ROOT/pages/6-garbage-collector-perf.adoc @@ -0,0 +1,804 @@ +// tag::header[] + +# Understanding JVM Garbage Collector Performance + + +:author: Li Haoyi +:revdate: 10 January 2025 +_{author}, {revdate}_ + +include::mill:ROOT:partial$gtag-config.adoc[] + +Garbage collectors are a core part of many programming languages. While they generally work +well, on occasion when they go wrong they can fail in very unintuitive ways. +This article will discuss the fundamental design of how garbage collectors +work, and tie it to real benchmarks of how GCs perform on the Java Virtual Machine. You +should come away with a deeper understanding of how the JVM garbage collector works and +concrete ways you can work to improve its performance in your own real-world projects. + +// end::header[] + +For TL,DR see the <> section at the end. + +## A Theoretical Garbage Collector + +To understand how real-world JVM garbage collectors works, it is best to start +by looking at a simple example garbage collector. This will both give an intuition +for how things work in general, and also help you notice when things diverge from this +idealized example. + +### Process Memory + +At its core, a garbage collector helps manage the free memory of a program, often called the +_heap_. The memory of a program can be modelled as a linear sequence of storage locations, e.g. +below where we have 16 slots in memory: + +```graphviz +digraph G { + + node [shape=box width=0 height=0 style=filled fillcolor=white] + heap [shape=record label="HEAP | | | | | | | | | | | | | | | | "] + + heap:f0:s -> alloc:n [dir=back, style=dotted] + alloc [label = "free memory", shape=plaintext] +} +``` + +These storage locations can contain objects (below named `foo`, `bar`, `qux`, `baz`) that take +up memory and may reference other objects (solid arrows). Furthermore, the values may be referenced from outside +the heap (dashed lines), e.g. from the "stack" which represents +local variables in methods that are currently being run (shown below) or from static global +variables (not shown). We keep a `free-memory` pointer to the first empty slot on the right. + + +```graphviz +digraph G { + + node [shape=box width=0 height=0 style=filled fillcolor=white] + heap [shape=record label="HEAP | foo | bar | qux | baz | | | | | | | | | | | | "] + heap:f0:s -> heap:f1:s + heap:f0:s -> heap:f2:s + heap:f2:n -> heap:f3:n + heap:f4:s -> alloc:n [dir=back, style=dotted] + alloc [label = "free memory", shape=plaintext] + + stack [shape=record label="STACK | | | | | | | | "] + stack:f0 -> heap:f1 [dir=none, style=dashed] + stack:f1 -> heap:f2 [dir=none, style=dashed] +} +``` + +If we want to allocate a new object `new1`, we can simply put it at the location of +the `free-memory` pointer (green below), and bump the pointer 1 slot to the right + +```graphviz +digraph G { + + node [shape=box width=0 height=0 style=filled fillcolor=white] + heap [shape=record label="HEAP | foo | bar | qux | baz | new1 | | | | | | | | | | | "] + heap:f0:s -> heap:f1:s + heap:f0:s -> heap:f2:s + heap:f2:n -> heap:f3:n + heap:f5:s -> alloc:n [dir=back, style=dotted] + alloc [label = "free memory", shape=plaintext] + + stack [shape=record label="STACK | | | | | | | | "] + stack:f0 -> heap:f1 [dir=none, style=dashed] + stack:f1 -> heap:f2 [dir=none, style=dashed] + stack:f2 -> heap:f4 [dir=none, style=dashed, color=green, penwidth=3] +} +``` + +Similarly, objects may stop being referenced, e.g. `bar` below no longer has a reference +pointing at it from the stack. This may happen because a local variable on the stack is +set to `null`, or because a method call returned and the local variables associated with +it are no longer necessary + + +```graphviz +digraph G { + + node [shape=box width=0 height=0 style=filled fillcolor=white] + heap [shape=record label="HEAP | foo | bar | qux | baz | new1 | | | | | | | | | | | "] + heap:f0:s -> heap:f1:s + heap:f0:s -> heap:f2:s + heap:f2:n -> heap:f3:n + heap:f5:s -> alloc:n [dir=back, style=dotted] + alloc [label = "free memory", shape=plaintext] + + stack [shape=record label="STACK | | | | | | | | "] + stack:f1 -> heap:f2 [dir=none, style=dashed] + stack:f2 -> heap:f4 [dir=none, style=dashed] +} +``` + +For the purposes of this example, we show all objects on the heap taking up 1 slot, but +in real programs the size of each object may vary depending on the fields it has +or if it's a variable-length array. + +### A Simple Garbage Collector + +The simplest kind of garbage collector splits the 16-slot heap we saw earlier into +two 8-slot halves. If we want to allocate 4 more objects (`new2`, to `new5`), but +there are only 3 slots left in that half of the heap, we will need to do a collection: + +```graphviz +digraph G { + + node [shape=box width=0 height=0 style=filled fillcolor=white] + {rank=same; heap1; heap2} + heap2 [shape=record label="HALF2 | | | | | | | | "] + + heap1 [shape=record label="HALF1 | foo | bar | baz | qux | new1 | | | "] + heap1:f0:s -> heap1:f1:s + heap1:f0:s -> heap1:f2:s + heap1:f2:n -> heap1:f3:n + heap1:f5:s -> alloc:n [dir=back, style=dotted] + alloc [label = "free memory", shape=plaintext] + + stack [shape=record label="STACK | | | | | | | | "] + stack:f1 -> heap1:f2 [dir=none, style=dashed] + stack:f2 -> heap1:f4 [dir=none, style=dashed] +} +``` + +To do a collection, the GC first starts from all non-heap +references (e.g. the `STACK` references above) often called "GC roots". It then traces +the graph of references, highlighted red below: + +```graphviz +digraph G { + + node [shape=box width=0 height=0 style=filled fillcolor=white] + {rank=same; heap1; heap2} + heap2 [shape=record label="HALF2 | | | | | | | | "] + + heap1 [shape=record label="HALF1 | foo | bar | qux | baz | new1| | | "] + heap1:f0:s -> heap1:f1:s + heap1:f0:s -> heap1:f2:s + heap1:f2:n -> heap1:f3:n [color=red, penwidth=3] + heap1:f5:s -> alloc:n [dir=back, style=dotted] + alloc [label = "free memory", shape=plaintext] + stack [shape=record label="STACK | | | | | | | | "] + stack:f1 -> heap1:f2 [dir=none, style=dashed, color=red, penwidth=3] + stack:f2 -> heap1:f4 [dir=none, style=dashed, color=red, penwidth=3] +} +``` + +Here, we can see that `foo` is not referenced ("garbage"), `qux` and `new1` are referenced directly from the +`STACK`, and `baz` is referenced indirectly from `qux`. `bar` is referenced by `foo`, but +because `foo` is itself garbage we can count `bar` as garbage as well. + +We then copy all objects we traced (often called the _live-set_) from `HALF1` to `HALF2`, adjust all the references +appropriately. Now `HALF2` is the half of the heap in use, and `HALF1` can be reset to empty. + + +```graphviz +digraph G { + + node [shape=box width=0 height=0 style=filled fillcolor=white] + {rank=same; heap1; heap2} + heap2 [shape=record label="HALF2 | qux | baz | new1 | | | | | "] + + heap1 [shape=record label="HALF1 | | | | | | | | "] + heap2:f0:s -> heap2:f1:s [color=red, penwidth=3] + + heap2:f2:s -> alloc:n [dir=back, style=dotted] + alloc [label = "free memory", shape=plaintext] + stack [shape=record label="STACK | | | | | | | | "] + stack:f0 -> heap2:f0 [dir=none, style=dashed, color=red, penwidth=3] + stack:f1 -> heap2:f2 [dir=none, style=dashed, color=red, penwidth=3] +} +``` + +This collection has freed up 5 slots, so we now have space to allocate the +4 `new2` to `new5` objects we wanted (green) starting from our `free-memory` pointer: + +```graphviz +digraph G { + + node [shape=box width=0 height=0 style=filled fillcolor=white] + {rank=same; heap1; heap2} + heap2 [shape=record label="HALF2 | qux | baz | new1 | new2 | new3 | new4 | new5 | "] + + heap1 [shape=record label="HALF1 | | | | | | | | "] + heap2:f0:s -> heap2:f1:s + heap2:f3:n -> heap2:f4:n [color=green, penwidth=3] + heap2:f4:s -> heap2:f5:s [color=green, penwidth=3] + + heap2:f7:s -> alloc:n [dir=back, style=dotted] + alloc [label = "free memory", shape=plaintext] + stack [shape=record label="STACK | | | | | | | | "] + stack:f0 -> heap2:f0 [dir=none, style=dashed] + stack:f1 -> heap2:f2 [dir=none, style=dashed] + stack:f2 -> heap2:f3 [dir=none, style=dashed, color=green, penwidth=3] + stack:f6 -> heap2:f6 [dir=none, style=dashed, color=green, penwidth=3] +} +``` + +You may notice that the objects `foo` and `bar` disappeared. This is because `foo` and `bar` +were not referenced directly or indirectly by any GC roots: they were unreachable, and thus +considered "garbage". These garbage objects were +not explicitly deleted, but simply did not get copied over from `HALF1` to `HALF2` +during collection, and thus were wiped out when `HALF1` was cleared. + +As your program executes, the methods actively running may change, and thus the references +(both from stack to heap and between entries on your heap) may change. For example, we may +stop referencing `qux`, which also means that `baz` is now unreachable: + +```graphviz +digraph G { + + node [shape=box width=0 height=0 style=filled fillcolor=white] + {rank=same; heap1; heap2} + heap2 [shape=record label="HALF2 | qux | baz | new1 | new2 | new3 | new4 | new5 | "] + + heap1 [shape=record label="HALF1 | | | | | | | | "] + heap2:f0:s -> heap2:f1:s + alloc [label = "free memory", shape=plaintext] + heap2:f7:s -> alloc:n [dir=back, style=dotted] + heap2:f3:n -> heap2:f4:n + heap2:f4:s -> heap2:f5:s + stack [shape=record label="STACK | | | | | | | | "] + stack [shape=record label="STACK | | | | | | | | "] + + stack:f1 -> heap2:f2 [dir=none, style=dashed] + stack:f2 -> heap2:f3 [dir=none, style=dashed] + stack:f6 -> heap2:f6 [dir=none, style=dashed] + + +} +``` + +Although `qux` and `baz` are now "garbage", they still take up space in the heap. Thus, if we want +to allocate two new objects (e.g. `new6` and `new7`), and there is only one slot left on the heap (above), +we need to repeat the garbage collection process: tracing +the objects transitively reachable (`new1`, `new2`, `new3`, `new4`, `new5`), copying them +from `HALF2` to `HALF1`, +adjusting any references to now use `HALF1` as the new heap, and clearing anything that was left +behind in `HALF2`. This then gives us enough space to allocate `new6` and `new7` (below in green): + +```graphviz +digraph G { + + node [shape=box width=0 height=0 style=filled fillcolor=white] + {rank=same; heap1; heap2} + heap2 [shape=record label="HALF2 | | | | | | | | "] + + heap1 [shape=record label="HALF1 | new1 | new2 | new3 | new4 | new5 | new6 | new7 | "] + heap1:f1:n -> heap1:f2:n [color=red, penwidth=3] + heap1:f2:s -> heap1:f3:s [color=red, penwidth=3] + heap1:f5:s -> heap1:f6:s [color=green, penwidth=3] + + stack [shape=record label="STACK | | | | | | | | "] + + stack:f0 -> heap1:f0 [dir=none, style=dashed, color=red, penwidth=3] + stack:f1 -> heap1:f1 [dir=none, style=dashed, color=red, penwidth=3] + + stack:f4 -> heap1:f4 [dir=none, style=dashed, color=red, penwidth=3, constraint=false] + stack:f5 -> heap1:f5 [dir=none, style=dashed, color=green, penwidth=3] + + heap1:f7:s -> alloc:n [dir=back, style=dotted] + alloc [label = "free memory", shape=plaintext] +} +``` + +This process can repeat as many times as necessary: as long as there are _some_ objects +that are unreachable, you can run a collection and copy the "live" objects to the other +half of the heap, freeing up some space to allocate new objects. The only reason this +may fail is that if you run a collection and there _still_ isn't enough space to allocate +the objects you want; that means your program has run out of memory, and will fail with +an `OutOfMemoryError` or similar. + +Even this simplistic GC has a lot of interesting properties, and you may have heard these +terms or labels that can apply to it: + +* *semi-space* garbage collector, because of the way it splits the heap into two halves + +* *copying* garbage collector, because it needs to copy the heap objects back and forth + between `HALF1` and `HALF2` + +* *tracing* garbage collector, because of the way it traverses the graph of heap + references in order to decide what to copy. + +* *stop the world* garbage collector, because while this whole trace-copy-update-references + workflow is happening, we have to stop the program to avoid race conditions between the garbage + collector and the program code. + +* *compacting* garbage collector, because every time we run a GC, we copy everything to the + left-most memory, avoiding the memory fragmentation that occurs with other memory + management techniques such as https://en.wikipedia.org/wiki/Reference_counting[Reference Counting]. + +Most modern GCs are considerably more complicated than this: e.g. they may have optimizations +to avoid wasting half the heap by leaving it empty, or they may have +xref:_generational_optimizations[optimizations for handling short-lived objects], but at +their heart this is still what they do. And understanding the performance characteristics of +this simple, naive GC can help give you an intuition in how GCs compare to other memory management +strategies, and how modern GCs behave in terms of performance. + + + +## Compared to Reference Counting + +https://en.wikipedia.org/wiki/Reference_counting[Reference Counting] is another popular +memory management strategy that Garbage Collection is often compared to. Reference counting +works by keeping track of how many incoming references each object has, and when that +number reaches zero the object can be collected. This approach has a few major differences +from that of a tracing GC. We discuss a few of them below: + +### Reference counting does not compact the heap + +Program that use reference +counting tend to find their heap getting more and more fragmented over time +We can see this in the heap diagrams: the tracing garbage collector heaps above always had a +single block of empty space to the right, and had the `new` objects allocated in ascending order +from left-to-right: + +```graphviz +digraph G { + + node [shape=box width=0 height=0 style=filled fillcolor=white] + heap1 [shape=record label="HEAP | new1 | new2 | new3 | new4 | new5 | | | | | | | | | | | "] + + stack [shape=record label="STACK | | | | | | | | "] + + stack:f0 -> heap1:f0 [dir=none, style=dashed] + stack:f1 -> heap1:f1 [dir=none, style=dashed] + heap1:f1:n -> heap1:f2:n + heap1:f2:s -> heap1:f3:s + stack:f4 -> heap1:f4 [dir=none, style=dashed] + heap1:f5:s -> alloc:n [dir=back, style=dotted] + alloc [label = "free memory", shape=plaintext] +} +``` + + +In contrast, +reference counted heaps (e.g. below) tend to get fragmented, with free space scattered about, +and the allocated objects jumbled up in no particular order + + +```graphviz +digraph G { + + node [shape=box width=0 height=0 style=filled fillcolor=white] + heap [shape=record label="HEAP | new2 | new3 | | | new1 | new4 | new5 | | | | | | | | | "] + + stack [shape=record label="STACK | | | | | | | | "] + stack:f0 -> heap:f4 [dir=none, style=dashed] + stack:f1 -> heap:f0 [dir=none, style=dashed] + heap:f0:n -> heap:f1:n + heap:f1:s -> heap:f5:s + stack:f4 -> heap:f6 [dir=none, style=dashed] +} +``` + + +There are two main ways this affect performance: + +* With garbage collection all the free memory is always on the right in one contiguous block, + so an allocation just involves putting the object at the `free-pointer` location and moving + `free-pointer` one slot to the right. Furthermore, newly allocated objects (which tend to + be used together) are placed next to each other, making them more cache-friendly and + improving access performance + +* With reference counting objects are usually freed in-place, meaning that the free space is + scattered throughout the heap, and you may need to scan the entire heap from left-to-right + in order to find a spot to allocate something. There are data structures and algorithms that + can make allocation faster than a linear scan, but they will never be as fast as the single + pointer lookup necessary with a GC + +### Reference counting cannot collect cycles + +Objects that reference each other cyclically can thus +cause memory leaks when their objects never get collected, resulting in the program running +out of memory even though much of the heap could be cleaned up by a tracing garbage collector. + + +For example, consider the following heap, identical to the one we started with, but with an +additional edge from `bar` to `foo` (green), and with the edge from the stack to `bar` removed: + + +```graphviz +digraph G { + + node [shape=box width=0 height=0 style=filled fillcolor=white] + heap [shape=record label="HEAP | foo | bar | qux | baz | | | | | | | | | | | | "] + heap:f0:s -> heap:f1:s + heap:f1:n -> heap:f0:n [penwidth=3 color=green] + heap:f0:s -> heap:f2:s + heap:f2:n -> heap:f3:n [color=red, penwidth=3] + + stack [shape=record label="STACK | | | | | | | | "] + stack:f1 -> heap:f2 [dir=none, style=dashed, color=red, penwidth=3] +} +``` + +* With reference counting, even though `foo` and `bar` cannot be reached by any external reference - +they are "garbage" - each one still has a reference pointing at it from the other. Thus +they will never get collected + +* But with a tracing garbage collector, a collection can traverse the reference graph (red), +and copy `qux` and `baz` to the other half of the heap, leaving `foo` and `bar` behind as garbage, +despite the reference cycle between them + +_Garbage Collection_ and _Reference Counting_ have very different characteristics, and neither +is strictly superior to the other in all scenarios. +Many programming languages (e.g. Python) that use reference counting also have a backup +tracing garbage collector that runs once in a while to clean up unreachable reference cycles +and compact the heap, and most modern GCs (e.g. ZGC discussed below) use some reference-counting +techniques as part of their implementation. + + +## Theoretical GC Performance + +Typically, GC performance focuses on two main aspects: + +- *Overhead*: what % of the time your program is spent collecting garbage, rather than + real work. Lower is better +- *Pause Times*: what is the longest time your program is completely paused while + collecting garbage. Lower is better + + +These two metrics are separate: + +* *Some programs only care about throughput*, e.g. if you only care about how long a big batch + analysis takes to complete, and don't care if it pauses in the middle to GC: you just want + it to finish as soon as possible +* *Other programs only care about pause times*, e.g. someone playing a videogame doesn't care if + it can run faster than their eye can perceive, but they do care that it does not freeze or + pause for noticeable amounts of time while you are playing it + +Even from the limited description above, we can already make some interesting inferences +about how the performance of a simple garbage collector will be like. + +1. *Allocations in garbage collectors are _cheap_*: when the heap is not yet full, we can + just allocate things on the first empty slots on the right side of the heap and bump `free-pointer`, + without having to scan the heap to find empty slots. + +2. *Pause times should be proportional to the size of the live-set*. That is because + a collection involves tracing, copying, then updating the references within the live-set. + +3. *Pause times would _not_ depend on the amount of garbage to be collected*. The collection + we looked at above spend no time at all looking at or scanning for garbage objects, + they simply all disappeared when their half of the heap was wiped out following a collection. + +4. *Interval between collections is inversely proportional to free memory*. + We only need to run a collection when the garbage we allocate fills up the "extra" heap memory + our program has on top of what is necessary to store the live-set. + +5. *GC overhead is the pause time divided by the interval, or proportional + to the extra memory and inversely proportional to the live-set size and heap size* + +In other words: + +* `allocation_cost = O(1)` + +* `gc_pause_time = O(live-set)` + +* `gc_interval = O(heap-size - live-set)` + +* `gc_overhead = gc_pause_time / gc_interval` + +* `gc_overhead = O(live-set / (heap-size - live-set))` + +Even from this small conclusions, we can already see some unintuitive results: + +1. *More memory does _not_ reduce pause times!* `gc_pause_time = O(live-set)`, and so + pause times do not depend on how much `heap-size` you have. + +2. *There is no point at which providing more memory does not improve GC overhead!* + `gc_overhead = O(live-set / (heap-size - live-set))`, so + providing larger and larger ``heap-size``s means less and less GC overhead, meaning a + larger % of your program time is spent on useful work. + +3. *Conversely, providing exactly as much memory as the program requires_ is the worst + case possible!* `gc_overhead = O(live-set / (heap-size - live-set))` when + `heap-size = live-set` means `gc_interval = 0` and `gc_overhead = infinity`: + the program will constantly need to run an expensive collections and have no time left + to do actual work. Garbage collectors therefore _need_ excess memory to work with, on top + of the memory you would expect to need to allocate all the objects in your program. + +Even from this theoretical analysis, we have already found a number of surprising results +in how GCs perform over time. Let's now see how this applies to some real-world garbage +collectors included with the Java Virtual Machine + +## Benchmarking JVM Garbage Collectors + +Now that we have run through a theoretical introduction and analysis of how GCs work +and how we would expect them to perform, let's look at some small Java programs and +monitor how garbage collection happens when using them. For this benchmark, we'll +be using the following Java program: + +- xref:attachment$GC.java[GC.java] + +This is a small Java program designed to do a rough benchmark of Java garbage collection +performance. For each benchmark, it: + +1. Starts off allocating a bunch of `int[]` arrays of varying size in `liveSet`, on + average taking up 1000 bytes each. + +2. Loops continuously to allocate more ``int[]``s and over-writes the references + to older ones + +3. Tracks how long each allocation takes to run: ideally it should be almost instant, but if + that allocation triggers a GC it may take some time + +4. Lastly, we print out the two numbers we care about + in a GC: the `maxPause` time in milliseconds, and the `throughput` it is able to handle + in megabytes per second (`throughput` being the opposite of `overhead` we mentioned earlier). + +To be clear, this benchmark is _rough_. Performance will vary between runs, and on what hardware +and software you run it (I ran it on a M1 Macbook Pro running Java 23). But the results should be +clear even if the exact numbers will differ between runs. + +You can run this program via: + +```bash +> javac -Xmx1g GC.java 800 10000 5 # Default is -XX:+UseG1GC +> javac -Xmx1g -XX:+UseParallelGC GC.java 800 10000 5 +> javac -Xmx1g -XX:+UseZGC GC.java 800 10000 5 +``` + +Above, `-Xmx1g` sets the heap size, the `-XX:` flags set the garbage collector, 800 sets +the `liveSet` size (in megabytes), and `10000` and `5` set the duration and number of +iterations to run the benchmark (here 10 seconds, 5 iterations). The measured pause times +and allocation rate are averaged over those 5 iterations. + +I used the following Java program to run the benchmark for a +range of inputs to collect the numbers shown below: + +- xref:attachment$GCBenchmark.java[GCBenchmark.java] + +### G1 Garbage Collector Benchmarks + +Running this on the default GC (G1), we get the followings numbers: + +*Pause Times* +[%autowidth.stretch, cols=">,>,>,>,>,>"] +|=== +| live-set\heap-size | 800 mb | 1600 mb | 3200 mb | 6400 mb | 12800 mb +| 400 mb | 39 ms | 48 ms | 74 ms | 63 ms | 90 ms +| 800 mb | | 72 ms | 82 ms | 144 ms | 165 ms +| 1600 mb | | | 129 ms | 137 ms | 267 ms +| 3200 mb | | | | 248 ms | 307 ms +| 6400 mb | | | | | 624 ms +|=== + +*Throughput* +[%autowidth.stretch, cols=">,>,>,>,>,>"] +|=== +| live-set\heap-size | 800 mb | 1600 mb | 3200 mb | 6400 mb | 12800 mb +| 400 mb | 3238 mb/s | 3938 mb/s | 5329 mb/s | 5198 mb/s | 5410 mb/s +| 800 mb | | 3180 mb/s | 3765 mb/s | 4602 mb/s | 4550 mb/s +| 1600 mb | | | 3046 mb/s | 3632 mb/s | 3777 mb/s +| 3200 mb | | | | 3000 mb/s | 3148 mb/s +| 6400 mb | | | | | 2618 mb/s +|=== + +As mentioned earlier, garbage collectors require some amount of free space in order to +work well, and so we only ran the benchmarks where the `heap-size` was twice or more +of the `live-set` size. + +Above, we can see the behavior we discussed earlier in the theoretical performance analysis: + +1. *GC pause times go up as the size of the live set increases*. With a `800 mb` heap and + `400 mb` live set the average pause time is `39 ms`, and it scales smoothly up to a + `6400 mb` heap and `3200 mb` live set where the pause time is `624 ms` + +2. *GC pause times are relatively constant regardless of the heap size*. + e.g. for `400 mb` live set a `800 mb` heap has a `39 ms` pause time, while a `400 mb` live set and + `6400 mb` heap (8 times as large!) has a `90 ms` pause time. In fact, increasing the heap + size while keeping other things constant seems to make pause times go up slightly in + this benchmark! + +3. *GC throughput goes up as the heap size increases*, e.g. for a `400 mb` live set it goes + smoothly from `3238 mb/s` for a `800 mb` heap to a `5410 mb/s` pause time for a `6400 mb` + heap. + + +### Generational Optimizations + +One additional GC behavior worth discussing is the "Generational Hypothesis". +The idea is that _"most"_ objects do not live a long time, e.g. objects allocated within a method are often +collect when the method returns. Given that assumption, many GCs have made optimizations +for the collection of objects that become garbage quickly, such that collecting them is +much cheaper. Practically, that means that the same `live-set` and `heap-size` can have +vastly different performance depending on how the allocations are structured: + +1. "Least Recently Used" garbage collections, where the _oldest_ objects are the ones that + get collected, will perform the worst + +2. "Most Recently Used" garbage collections, where the _newest_ objects are the ones that + get collected, will perform the best + +Notable, "LRU" is one of the most common caching strategies, which it is possible +for in-memory caches with LRU cache eviction to make GC problems worse! + +The example Java benchmark above keeps objects around a while before they become garbage, +by assigning new allocations to randomly indices in the `liveSet` array. +We can instead always assign new allocations to indices via a +https://en.wikipedia.org/wiki/Random_walk[Random Walk]: +randomly adjacent to the left or right of the previously-assigned allocation, meaning +that recently allocated objects are likely to be over-written (becoming unreachable and +eligible for garbage collection) more quickly by newer allocations +in the same part of `liveSet`, while older objects in other parts of `liveSet` are less +likely to be become unreachable. This lets us emulate the "generational" behavior that +is common in real-world programs: + +```diff +-liveSetIndex = random.nextInt(liveSetSize); ++liveSetIndex += (random.nextBoolean() ? 1 : -1) + liveSetSize; ++liveSetIndex %= liveSetSize; +``` + + +If we do this and measure the pause times and throughput of the example program, +we get the following: + +*Pause Times* +[%autowidth.stretch, cols=">,>,>,>,>,>"] +|=== +| live-set\heap-size | 800 mb | 1600 mb | 3200 mb | 6400 mb | 12800 mb +| 400 mb | 4 ms | 3 ms | 2 ms | 4 ms | 2 ms +| 800 mb | | 3 ms | 3 ms | 12 ms | 3 ms +| 1600 mb | | | 5 ms | 10 ms | 4 ms +| 3200 mb | | | | 13 ms | 11 ms +| 6400 mb | | | | | 22 ms +|=== + +*Throughput* +[%autowidth.stretch, cols=">,>,>,>,>,>"] +|=== +| live-set\heap-size | 800 mb | 1600 mb | 3200 mb | 6400 mb | 12800 mb +| 400 mb | 7218 mb/s | 7495 mb/s | 7536 mb/s | 7550 mb/s | 7634 mb/s +| 800 mb | | 7497 mb/s | 7790 mb/s | 7580 mb/s | 7819 mb/s +| 1600 mb | | | 7563 mb/s | 7464 mb/s | 7830 mb/s +| 3200 mb | | | | 7128 mb/s | 5854 mb/s +| 6400 mb | | | | | 3286 mb/s +|=== + + +Where the previous random-allocation benchmark has pause times of 10s to 100s to 1000s of +milliseconds, this "generational" benchmark has pause times in the 1s to 10s. The program +throughput is also significantly higher. +This demonstrates that the default G1 garbage collector does in fact have optimizations that +make it perform better for "generational" workloads. + +Most GCs have some kind of optimization to make collecting recently-allocated objects +cheaper than collecting long-lived objects; these are often called _generational_ +garbage collectors. Java's G1GC is no different, and we can see that even with +the same live-set size and heap sizes, shorter-lived objects are dramatically +cheaper to collect than long-lived objects. + + +### Z Garbage Collector Benchmarks + +One interesting development in JVM garbage collectors is the +https://docs.oracle.com/en/java/javase/21/gctuning/z-garbage-collector.html[Z Garbage Collector]. +This is a garbage collector that is optimized for lower pause times, exchange for requiring +much more memory than the default G1GC. If we run the benchmarks above with ZGC, +even without the <>, we get the numbers below: + +*Pause Times* +[%autowidth.stretch, cols=">,>,>,>,>,>"] +|=== +| live-set\heap-size | 800 mb | 1600 mb | 3200 mb | 6400 mb | 12800 mb +| 400 mb | 39 ms | 12 ms | 1 ms | 1 ms | 1 ms +| 800 mb | | 63 ms | 1 ms | 1 ms | 3 ms +| 1600 mb | | | 208 ms | 9 ms | 1 ms +| 3200 mb | | | | 378 ms | 2 ms +| 6400 mb | | | | | 701 ms +|=== + +*Throughput* +[%autowidth.stretch, cols=">,>,>,>,>,>"] +|=== +| live-set\heap-size | 800 mb | 1600 mb | 3200 mb | 6400 mb | 12800 mb +| 400 mb | 2428 mb/s | 4130 mb/s | 5139 mb/s | 5647 mb/s | 5943 mb/s +| 800 mb | | 2587 mb/s | 3920 mb/s | 4776 mb/s | 4975 mb/s +| 1600 mb | | | 2383 mb/s | 3513 mb/s | 4088 mb/s +| 3200 mb | | | | 2282 mb/s | 3186 mb/s +| 6400 mb | | | | | 2304 mb/s +|=== + +Some things worth noting with ZGC: + +1. In the lower `heap-size` benchmarks - with `heap-size` twice `live-set` - ZGC +has worse pause times than the default G1GC (10s to 100s of milliseconds) but +and worse throughput (`2300-2600 mb/s` rather than the `2800-3100 mb/s` of G1GC) + +2. For larger ``heap-size``s - 4 times the `live-set` and above - ZGC's pause times drop to +single-digit milliseconds (1-10 ms), much lower than those of G1GC + +As mentioned in the discussion on <>, for +most garbage collectors pause times are proportional to the live set, and increasing the +heap size does not help at all (and according to our <>, may +even make things worse!). This can be problematic, because there are many use cases that +cannot tolerate long GC pause times, but at the same time may require a significant amount +of live data to be kept in memory, so shrinking the live-set is not possible. + +ZGC provides an option here, where if you are willing to provide _significantly_ +more memory than the default G1GC requires, perhaps twice as much, you can get your +pause times from 10-100s of milliseconds down to 1-2 milliseconds. These pause times +remain low for a wide range of heap sizes and live set sizes, and can be beneficial +for a lot of applications that cannot afford to just randomly stop for 100ms at a time. +But the extra memory requirement means it's not a strict improvement, and it really +depends on your use case whether the tradeoff is worth it. + + +## GC Performance Takeaways + + +Now that we've studied garbage collections in theory, and looked at some concrete +numbers, there are some interesting conclusions. First, the unintuitive things: + +1. *Adding more memory does _not improve_ GC pause times*. It may even make things worse! + This is perhaps the most unintuitive thing about garbage collectors: it seems so + obvious that problems with memory management would be solved by adding more memory, + but we can see from our theoretical analysis above why that is not the case, + and we verified that empirically in benchmarks. + +2. *Caching data _in-process_ can make garbage collection pause times _worse_!* If + you have problems with GC pause times then caching things in-memory will + increase the size of your _live-set_ and therefore make your pause times even worse! + "LRU" caches in particular are the worst case for garbage collectors, which are typically + optimized for collecting recently-allocated short-lived objects. + In contrast, caching things _out of process_ + does not have this problem. Caching can be worthwhile to reduce redundant computation, + but it is not a solution to garbage collection problems. + +3. *There will never be an _exact_ amount of memory that a garbage-collected application + needs.* You can _always_ reduce-overhead/increase-throughput by providing more memory, + to make GCs less and less frequent, leaving more time to do useful work. And you can + usually provide less memory, at the cost of more and more frequent GCs. Exactly how much + memory to provide is thus something you tweak and tune rather than something you can + calculate exactly. + +4. *Fewer larger processes can have worse GC performance than more smaller processes!* + There are many ways in which consolidating smaller processes into larger ones can + improve efficiency: less per-process overhead, eliminating + https://en.wikipedia.org/wiki/Inter-process_communication[inter-process communication] cost, + etc. But GC pause times scale with _total live set size_, so combining two smaller + processes into one large one can make pause times _worse_ than they were before. + Even if the large process does the same thing as the smaller processes, it can + suffer from worse GC pause times. + +5. *You can reduce pause times by reducing the _live-set_*. If you have very large + in-process data structures, moving them somewhere else (e.g. into + https://www.sqlite.org/[SQLite], https://github.com/redis/redis[Redis], + or https://memcached.org/[Memcached]) would reduce the amount of objects the GC + needs to trace and copy every collection, and reduce the pause times + +6. *Shorter-lived objects are faster to collect*, due to most GCs being _generational_. This + also ties into (1) above: caches tend to keep lots of long-lived objects in memory, which + apart from slowing down collections due to the size of the live-set, _also_ slows them down + by missing out on the GC's optimizations for short-lived objects. + +7. *Switch to the Z garbage collector lets you trade off memory for pause times.* + JVM programs are by default already very memory hungry compared to other languages + (Go, Rust, etc.) and ZGC requires perhaps another 2x as much memory to work. But + if you are willing to pay the cost, ZGC can bring pause times down from 50-500ms + down to 1-5ms, which may make a big different for latency-sensitive applications. + + +The Java benchmarks above were run on one particular set of hardware on one version +of the JVM, and the exact numbers will differ when run on other hardware or JVM versions. +Nevertheless, the overall trends that you can see would remain the same, as would the +take-aways of what you need to know to understand garbage collector performance. + + +## Conclusion + +Garbage collectors can be complicated, differing in design +and implementation between languages (Python, Java, Go, etc.) and even within the same +language (Java's https://docs.oracle.com/en/java/javase/11/gctuning/parallel-collector1.html[ParallelGC], +https://docs.oracle.com/en/java/javase/17/gctuning/garbage-first-g1-garbage-collector1.html[G1GC], +the newer https://docs.oracle.com/en/java/javase/21/gctuning/z-garbage-collector.html[ZGC], etc.). +There are endless clever optimizations for the language designers to implement and knobs +for language users to tweak and tune. + +However, at a high level most GCs are actually surprisingly similar, have the same +odd performance characteristics, and the same surprising pitfalls. +Hopefully this article will have given you a good intuition for how garbage collectors work and behave, so +next time you need to do something with your GC you have a solid understanding to work with. diff --git a/blog/modules/ROOT/pages/index.adoc b/blog/modules/ROOT/pages/index.adoc new file mode 100644 index 00000000000..3ccfd313c6f --- /dev/null +++ b/blog/modules/ROOT/pages/index.adoc @@ -0,0 +1,34 @@ +# The Mill Build Engineering Blog + +include::mill:ROOT:partial$gtag-config.adoc[] + + +Welcome to the Mill build engineering blog! This is the home for articles on +technical topics related to JVM platform tooling and language-agnostic build tooling, +some specific to the Mill build tool but mostly applicable to anyone working on +build tooling for large codebases in JVM and non-JVM languages. + +include::6-garbage-collector-perf.adoc[tag=header,leveloffset=1] + +xref:6-garbage-collector-perf.adoc[Read More...] + +include::5-executable-jars.adoc[tag=header,leveloffset=1] + +xref:5-executable-jars.adoc[Read More...] + +include::4-flaky-tests.adoc[tag=header,leveloffset=1] + +xref:4-flaky-tests.adoc[Read More...] + +include::3-selective-testing.adoc[tag=header,leveloffset=1] + +xref:3-selective-testing.adoc[Read More...] + +include::2-monorepo-build-tool.adoc[tag=header,leveloffset=1] + +xref:2-monorepo-build-tool.adoc[Read More...] + +include::1-java-compile.adoc[tag=header,leveloffset=1] + +xref:1-java-compile.adoc[Read More...] + diff --git a/bsp/package.mill b/bsp/package.mill index 955ed3da0f7..f19e2eae0c9 100644 --- a/bsp/package.mill +++ b/bsp/package.mill @@ -4,13 +4,12 @@ import mill._ import mill.contrib.buildinfo.BuildInfo import mill.T - object `package` extends RootModule with build.MillPublishScalaModule with BuildInfo { def compileModuleDeps = Seq(build.scalalib) def testModuleDeps = super.testModuleDeps ++ compileModuleDeps def buildInfoPackageName = "mill.bsp" - def buildInfoMembers = T { + def buildInfoMembers = Task { val workerDep = worker.publishSelfDependency() Seq( BuildInfo.Value( @@ -23,7 +22,7 @@ object `package` extends RootModule with build.MillPublishScalaModule with Build override lazy val test: MillScalaTests = new Test {} trait Test extends MillScalaTests { - def forkEnv: T[Map[String, String]] = T { + def forkEnv: T[Map[String, String]] = Task { // We try to fetch this dependency with coursier in the tests worker.publishLocalCached() super.forkEnv() @@ -33,7 +32,12 @@ object `package` extends RootModule with build.MillPublishScalaModule with Build } object worker extends build.MillPublishScalaModule { - def compileModuleDeps = Seq(build.bsp, build.scalalib, build.testrunner, build.runner) ++ build.scalalib.compileModuleDeps + def compileModuleDeps = Seq( + build.bsp, + build.scalalib, + build.testrunner, + build.runner + ) ++ build.scalalib.compileModuleDeps def ivyDeps = Agg(build.Deps.bsp4j, build.Deps.sbtTestInterface) } } diff --git a/bsp/src/mill/bsp/BSP.scala b/bsp/src/mill/bsp/BSP.scala index dfa143fe147..9c93cc0b472 100644 --- a/bsp/src/mill/bsp/BSP.scala +++ b/bsp/src/mill/bsp/BSP.scala @@ -1,7 +1,7 @@ package mill.bsp import mill.api.{Ctx, PathRef} -import mill.{Agg, T} +import mill.{Agg, T, Task} import mill.define.{Command, Discover, ExternalModule} import mill.main.BuildInfo import mill.eval.Evaluator @@ -12,7 +12,7 @@ object BSP extends ExternalModule with CoursierModule { lazy val millDiscover: Discover = Discover[this.type] - private def bspWorkerLibs: T[Agg[PathRef]] = T { + private def bspWorkerLibs: T[Agg[PathRef]] = Task { millProjectModule("mill-bsp-worker", repositoriesTask()) } @@ -30,7 +30,7 @@ object BSP extends ExternalModule with CoursierModule { * reason, the message and stacktrace of the exception will be * printed to stdout. */ - def install(jobs: Int = 1): Command[(PathRef, ujson.Value)] = T.command { + def install(jobs: Int = 1): Command[(PathRef, ujson.Value)] = Task.Command { // we create a file containing the additional jars to load val libUrls = bspWorkerLibs().map(_.path.toNIO.toUri.toURL).iterator.toSeq val cpFile = @@ -46,11 +46,11 @@ object BSP extends ExternalModule with CoursierModule { /** * This command only starts a BSP session, which means it injects the current evaluator into an already running BSP server. * This command requires Mill to start with `--bsp` option. - * @param ev The Evaluator + * @param allBootstrapEvaluators The Evaluator * @return The server result, indicating if mill should re-run this command or just exit. */ def startSession(allBootstrapEvaluators: Evaluator.AllBootstrapEvaluators) - : Command[BspServerResult] = T.command { + : Command[BspServerResult] = Task.Command { T.log.errorStream.println("BSP/startSession: Starting BSP session") val res = BspContext.bspServerHandle.runSession(allBootstrapEvaluators.value) T.log.errorStream.println(s"BSP/startSession: Finished BSP session, result: ${res}") @@ -75,11 +75,10 @@ object BSP extends ExternalModule with CoursierModule { } private def bspConnectionJson(jobs: Int, debug: Boolean): String = { - val props = sys.props - val millPath = props - .get("mill.main.cli") + val millPath = sys.env.get("MILL_MAIN_CLI") + .orElse(sys.props.get("mill.main.cli")) // we assume, the classpath is an executable jar here - .orElse(props.get("java.class.path")) + .orElse(sys.props.get("java.class.path")) .getOrElse(throw new IllegalStateException("System property 'java.class.path' not set")) upickle.default.write( diff --git a/bsp/src/mill/bsp/BspContext.scala b/bsp/src/mill/bsp/BspContext.scala index b93ce2d187c..4818c3fbab4 100644 --- a/bsp/src/mill/bsp/BspContext.scala +++ b/bsp/src/mill/bsp/BspContext.scala @@ -55,6 +55,7 @@ private[mill] class BspContext( override def info(s: String): Unit = streams.err.println(s) override def error(s: String): Unit = streams.err.println(s) override def ticker(s: String): Unit = streams.err.println(s) + override def setPromptDetail(key: Seq[String], s: String): Unit = streams.err.println(s) override def debug(s: String): Unit = streams.err.println(s) override def debugEnabled: Boolean = true diff --git a/bsp/src/mill/bsp/BspServerHandle.scala b/bsp/src/mill/bsp/BspServerHandle.scala index b818bb22766..b24d2f7237d 100644 --- a/bsp/src/mill/bsp/BspServerHandle.scala +++ b/bsp/src/mill/bsp/BspServerHandle.scala @@ -9,7 +9,7 @@ trait BspServerHandle { /** * Runs a new session with the given evaluator. This one blocks until the session ends. - * @return The reason which the session ended, possibly indictating the wish for restart (e.g. in case of workspace reload). + * @return The reason which the session ended, possibly indicating the wish for restart (e.g. in case of workspace reload). */ def runSession(evaluators: Seq[Evaluator]): BspServerResult diff --git a/bsp/src/mill/bsp/BspWorker.scala b/bsp/src/mill/bsp/BspWorker.scala index f24a3b6c3a9..2675783790c 100644 --- a/bsp/src/mill/bsp/BspWorker.scala +++ b/bsp/src/mill/bsp/BspWorker.scala @@ -34,8 +34,8 @@ private object BspWorker { urls }.getOrElse { // load extra classpath entries from file - val cpFile = - workspace / Constants.bspDir / s"${Constants.serverName}-${mill.main.BuildInfo.millVersion}.resources" + val resources = s"${Constants.serverName}-${mill.main.BuildInfo.millVersion}.resources" + val cpFile = workspace / Constants.bspDir / resources if (!os.exists(cpFile)) return Left( "You need to run `mill mill.bsp.BSP/install` before you can use the BSP server" ) diff --git a/bsp/src/mill/bsp/Constants.scala b/bsp/src/mill/bsp/Constants.scala index 3bc12cc3577..735b27f3d61 100644 --- a/bsp/src/mill/bsp/Constants.scala +++ b/bsp/src/mill/bsp/Constants.scala @@ -6,6 +6,6 @@ private[mill] object Constants { val bspProtocolVersion = BuildInfo.bsp4jVersion val bspWorkerImplClass = "mill.bsp.worker.BspWorkerImpl" val bspWorkerBuildInfoClass = "mill.bsp.worker.BuildInfo" - val languages: Seq[String] = Seq("scala", "java") + val languages: Seq[String] = Seq("java", "scala", "kotlin") val serverName = "mill-bsp" } diff --git a/bsp/worker/src/mill/bsp/worker/BspCompileProblemReporter.scala b/bsp/worker/src/mill/bsp/worker/BspCompileProblemReporter.scala index b97ec0af4ed..041335a90b4 100644 --- a/bsp/worker/src/mill/bsp/worker/BspCompileProblemReporter.scala +++ b/bsp/worker/src/mill/bsp/worker/BspCompileProblemReporter.scala @@ -154,6 +154,18 @@ private class BspCompileProblemReporter( client.onBuildTaskStart(taskStartParams) } + override def notifyProgress(percentage: Long, total: Long): Unit = { + val params = new TaskProgressParams(taskId).tap { it => + it.setEventTime(System.currentTimeMillis()) + it.setData(new CompileTask(targetId)) + it.setDataKind("compile-progress") + it.setMessage(s"Compiling target ${targetDisplayName} ($percentage%)") + it.setProgress(percentage) + it.setTotal(total) + } + client.onBuildTaskProgress(params) + } + override def finish(): Unit = { val taskFinishParams = new TaskFinishParams(taskId, if (errors > 0) StatusCode.ERROR else StatusCode.OK).tap { it => diff --git a/bsp/worker/src/mill/bsp/worker/BspTestReporter.scala b/bsp/worker/src/mill/bsp/worker/BspTestReporter.scala index 746886b2121..c5a0e8f7c8d 100644 --- a/bsp/worker/src/mill/bsp/worker/BspTestReporter.scala +++ b/bsp/worker/src/mill/bsp/worker/BspTestReporter.scala @@ -28,7 +28,7 @@ import java.io.{PrintWriter, StringWriter} /** * Context class for BSP, specialized for sending `task-start` and - * `task-finish` notifications for every test being ran. + * `task-finish` notifications for every test being run. * * @param client The client to send notifications to * @param targetId The targetId of the BSP target for which diff --git a/bsp/worker/src/mill/bsp/worker/MillBspLogger.scala b/bsp/worker/src/mill/bsp/worker/MillBspLogger.scala index 7f386b4dc37..59c4104621c 100644 --- a/bsp/worker/src/mill/bsp/worker/MillBspLogger.scala +++ b/bsp/worker/src/mill/bsp/worker/MillBspLogger.scala @@ -20,8 +20,8 @@ import mill.util.{ColorLogger, ProxyLogger} private class MillBspLogger(client: BuildClient, taskId: Int, logger: Logger) extends ProxyLogger(logger) with ColorLogger { - def infoColor = fansi.Color.Blue - def errorColor = fansi.Color.Red + override def infoColor = fansi.Color.Blue + override def errorColor = fansi.Color.Red override def ticker(s: String): Unit = { try { diff --git a/bsp/worker/src/mill/bsp/worker/MillBuildServer.scala b/bsp/worker/src/mill/bsp/worker/MillBuildServer.scala index 0d846b33d85..a062ea06fe8 100644 --- a/bsp/worker/src/mill/bsp/worker/MillBuildServer.scala +++ b/bsp/worker/src/mill/bsp/worker/MillBuildServer.scala @@ -3,18 +3,19 @@ package mill.bsp.worker import ch.epfl.scala.bsp4j import ch.epfl.scala.bsp4j._ import com.google.gson.JsonObject -import mill.T -import mill.api.{DummyTestReporter, Result, Strict} -import mill.bsp.BspServerResult +import mill.api.Loose.Agg +import mill.api.{CompileProblemReporter, DummyTestReporter, Result, Strict, TestReporter} +import mill.bsp.{BspServerResult, Constants} import mill.bsp.worker.Utils.{makeBuildTarget, outputPaths, sanitizeUri} import mill.define.Segment.Label -import mill.define.{Args, Discover, ExternalModule, Task} +import mill.define.{Args, Discover, ExternalModule, NamedTask, Task} import mill.eval.Evaluator import mill.eval.Evaluator.TaskResult import mill.main.MainModule import mill.runner.MillBuildRootModule import mill.scalalib.bsp.{BspModule, JvmBuildTarget, ScalaBuildTarget} import mill.scalalib.{JavaModule, SemanticDbJavaModule, TestModule} +import mill.util.ColorLogger import java.io.PrintStream import java.util.concurrent.CompletableFuture @@ -34,6 +35,8 @@ private class MillBuildServer( ) extends ExternalModule with BuildServer { + import MillBuildServer._ + lazy val millDiscover: Discover = Discover[this.type] private[worker] var cancellator: Boolean => Unit = shutdownBefore => () @@ -44,7 +47,7 @@ private class MillBuildServer( protected var clientWantsSemanticDb = false protected var clientIsIntelliJ = false - /** `true` when client and server support the `JvmCompileClasspathProvider`` request. */ + /** `true` when client and server support the `JvmCompileClasspathProvider` request. */ protected var enableJvmCompileClasspathProvider = false private[this] var statePromise: Promise[State] = Promise[State]() @@ -72,7 +75,7 @@ private class MillBuildServer( // TODO: scan BspModules and infer their capabilities - val supportedLangs = Seq("java", "scala").asJava + val supportedLangs = Constants.languages.asJava val capabilities = new BuildServerCapabilities capabilities.setBuildTargetChangedProvider(false) @@ -155,7 +158,7 @@ private class MillBuildServer( override def workspaceBuildTargets(): CompletableFuture[WorkspaceBuildTargetsResult] = completableTasksWithState( "workspaceBuildTargets", - targetIds = _.bspModulesById.keySet.toSeq, + targetIds = _.bspModulesIdList.map(_._1), tasks = { case m: BspModule => m.bspBuildTargetData } ) { (ev, state, id, m: BspModule, bspBuildTargetData) => val depsIds = m match { @@ -174,14 +177,12 @@ private class MillBuildServer( bsp4j.ScalaPlatform.forValue(d.platform.number), d.jars.asJava ) + for (jvmBuildTarget <- d.jvmBuildTarget) + target.setJvmBuildTarget(MillBuildServer.jvmBuildTarget(jvmBuildTarget)) Some((dataKind, target)) case Some((dataKind, d: JvmBuildTarget)) => - val target = new bsp4j.JvmBuildTarget().tap { it => - d.javaHome.foreach(jh => it.setJavaHome(jh.uri)) - d.javaVersion.foreach(jv => it.setJavaVersion(jv)) - } - Some((dataKind, target)) + Some((dataKind, jvmBuildTarget(d))) case Some((dataKind, d)) => debug(s"Unsupported dataKind=${dataKind} with value=${d}") @@ -230,13 +231,12 @@ private class MillBuildServer( targetIds = _ => sourcesParams.getTargets.asScala.toSeq, tasks = { case module: MillBuildRootModule => - T.task { - module.scriptSources().map(p => sourceItem(p.path, false)) ++ - module.sources().map(p => sourceItem(p.path, false)) ++ + Task.Anon { + module.sources().map(p => sourceItem(p.path, false)) ++ module.generatedSources().map(p => sourceItem(p.path, true)) } case module: JavaModule => - T.task { + Task.Anon { module.sources().map(p => sourceItem(p.path, false)) ++ module.generatedSources().map(p => sourceItem(p.path, true)) } @@ -254,9 +254,9 @@ private class MillBuildServer( override def buildTargetInverseSources(p: InverseSourcesParams) : CompletableFuture[InverseSourcesResult] = { completable(s"buildtargetInverseSources ${p}") { state => - val tasksEvaluators = state.bspModulesById.iterator.collect { + val tasksEvaluators = state.bspModulesIdList.iterator.collect { case (id, (m: JavaModule, ev)) => - T.task { + Task.Anon { val src = m.allSourceFiles() val found = src.map(sanitizeUri).contains( p.getTextDocument.getUri @@ -265,11 +265,9 @@ private class MillBuildServer( } -> ev }.toSeq - val ids = tasksEvaluators - .groupMap(_._2)(_._1) + val ids = groupList(tasksEvaluators)(_._2)(_._1) .flatMap { case (ev, ts) => ev.evalOrThrow()(ts) } .flatten - .toSeq new InverseSourcesResult(ids.asJava) } @@ -295,7 +293,7 @@ private class MillBuildServer( targetIds = _ => p.getTargets.asScala.toSeq, tasks = { case m: JavaModule => - T.task { + Task.Anon { ( m.defaultResolver().resolveDeps( m.transitiveCompileIvyDeps() ++ m.transitiveIvyDeps(), @@ -334,7 +332,7 @@ private class MillBuildServer( hint = "buildTargetDependencyModules", targetIds = _ => params.getTargets.asScala.toSeq, tasks = { case m: JavaModule => - T.task { (m.transitiveCompileIvyDeps(), m.transitiveIvyDeps(), m.unmanagedClasspath()) } + Task.Anon { (m.transitiveCompileIvyDeps(), m.transitiveIvyDeps(), m.unmanagedClasspath()) } } ) { case ( @@ -351,10 +349,10 @@ private class MillBuildServer( new DependencyModule(dep.dep.module.repr, dep.dep.version) } - val unmanged = unmanagedClasspath.map { dep => + val unmanaged = unmanagedClasspath.map { dep => new DependencyModule(s"unmanaged-${dep.path.last}", "") } - new DependencyModulesItem(id, (deps ++ unmanged).iterator.toSeq.asJava) + new DependencyModulesItem(id, (deps ++ unmanaged).iterator.toSeq.asJava) } { new DependencyModulesResult(_) } @@ -364,8 +362,8 @@ private class MillBuildServer( s"buildTargetResources ${p}", targetIds = _ => p.getTargets.asScala.toSeq, tasks = { - case m: JavaModule => T.task { m.resources() } - case _ => T.task { Nil } + case m: JavaModule => Task.Anon { m.resources() } + case _ => Task.Anon { Nil } } ) { case (ev, state, id, m, resources) => @@ -387,7 +385,7 @@ private class MillBuildServer( case (m: SemanticDbJavaModule, ev) if clientWantsSemanticDb => (m.compiledClassesAndSemanticDbFiles, ev) case (m: JavaModule, ev) => (m.compile, ev) - case (m, ev) => T.task { + case (m, ev) => Task.Anon { Result.Failure( s"Don't know how to compile non-Java target ${m.bspBuildTarget.displayName}" ) @@ -397,7 +395,8 @@ private class MillBuildServer( val result = compileTasksEvs .groupMap(_._2)(_._1) .map { case (ev, ts) => - ev.evaluate( + evaluate( + ev, ts, Utils.getBspLoggedReporterPool(p.getOriginId, state.bspIdByModule, client), DummyTestReporter, @@ -446,8 +445,9 @@ private class MillBuildServer( }.get val args = params.getArguments.getOrElse(Seq.empty[String]) - val runTask = module.run(T.task(Args(args))) - val runResult = ev.evaluate( + val runTask = module.run(Task.Anon(Args(args))) + val runResult = evaluate( + ev, Strict.Agg(runTask), Utils.getBspLoggedReporterPool(runParams.getOriginId, state.bspIdByModule, client), logger = new MillBspLogger(client, runTask.hashCode(), ev.baseLogger) @@ -512,7 +512,8 @@ private class MillBuildServer( Seq.empty[String] ) - val results = ev.evaluate( + val results = evaluate( + ev, Strict.Agg(testTask), Utils.getBspLoggedReporterPool( testParams.getOriginId, @@ -572,7 +573,8 @@ private class MillBuildServer( val compileTargetName = (module.millModuleSegments ++ Label("compile")).render debug(s"about to clean: ${compileTargetName}") val cleanTask = mainModule.clean(ev, Seq(compileTargetName): _*) - val cleanResult = ev.evaluate( + val cleanResult = evaluate( + ev, Strict.Agg(cleanTask), logger = new MillBspLogger(client, cleanTask.hashCode, ev.baseLogger) ) @@ -636,18 +638,17 @@ private class MillBuildServer( State ) => V): CompletableFuture[V] = { val prefix = hint.split(" ").head - completable(hint) { state: State => + completable(hint) { (state: State) => val ids = state.filterNonSynthetic(targetIds(state).asJava).asScala val tasksSeq = ids.flatMap { id => val (m, ev) = state.bspModulesById(id) tasks.lift.apply(m).map(ts => (ts, (ev, id))) } - val evaluated = tasksSeq - // group by evaluator (different root module) - .groupMap(_._2)(_._1) + // group by evaluator (different root module) + val evaluated = groupList(tasksSeq.toSeq)(_._2)(_._1) .map { case ((ev, id), ts) => - val results = ev.evaluate(ts) + val results = evaluate(ev, ts) val failures = results.results.collect { case (_, TaskResult(res: Result.Failing[_], _)) => res } @@ -676,7 +677,7 @@ private class MillBuildServer( } } - agg(evaluated.flatten.toSeq.asJava, state) + agg(evaluated.flatten.asJava, state) } } @@ -773,4 +774,52 @@ private class MillBuildServer( override def onRunReadStdin(params: ReadParams): Unit = { debug("onRunReadStdin is current unsupported") } + + private def evaluate( + evaluator: Evaluator, + goals: Agg[Task[_]], + reporter: Int => Option[CompileProblemReporter] = _ => Option.empty[CompileProblemReporter], + testReporter: TestReporter = DummyTestReporter, + logger: ColorLogger = null + ): Evaluator.Results = { + val logger0 = Option(logger).getOrElse(evaluator.baseLogger) + mill.runner.MillMain.withOutLock( + noBuildLock = false, + noWaitForBuildLock = false, + out = evaluator.outPath, + targetsAndParams = goals.toSeq.map { + case n: NamedTask[_] => n.label + case t => t.toString + }, + streams = logger0.systemStreams + ) { + evaluator.evaluate( + goals, + reporter, + testReporter, + logger0, + serialCommandExec = false + ) + } + } +} + +private object MillBuildServer { + + /** + * Same as Iterable.groupMap, but returns a sequence instead of a map, and preserves + * the order of appearance of the keys from the input sequence + */ + private def groupList[A, K, B](seq: Seq[A])(key: A => K)(f: A => B): Seq[(K, Seq[B])] = { + val keyIndices = seq.map(key).distinct.zipWithIndex.toMap + seq.groupMap(key)(f) + .toSeq + .sortBy { case (k, _) => keyIndices(k) } + } + + def jvmBuildTarget(d: JvmBuildTarget): bsp4j.JvmBuildTarget = + new bsp4j.JvmBuildTarget().tap { it => + d.javaHome.foreach(jh => it.setJavaHome(jh.uri)) + d.javaVersion.foreach(jv => it.setJavaVersion(jv)) + } } diff --git a/bsp/worker/src/mill/bsp/worker/MillJavaBuildServer.scala b/bsp/worker/src/mill/bsp/worker/MillJavaBuildServer.scala index e7b00993267..f7b88ea037f 100644 --- a/bsp/worker/src/mill/bsp/worker/MillJavaBuildServer.scala +++ b/bsp/worker/src/mill/bsp/worker/MillJavaBuildServer.scala @@ -6,7 +6,7 @@ import ch.epfl.scala.bsp4j.{ JavacOptionsParams, JavacOptionsResult } -import mill.T +import mill.Task import mill.bsp.worker.Utils.sanitizeUri import mill.scalalib.{JavaModule, SemanticDbJavaModule} @@ -26,7 +26,7 @@ private trait MillJavaBuildServer extends JavaBuildServer { this: MillBuildServe sem.bspCompiledClassesAndSemanticDbFiles case _ => m.bspCompileClassesPath } - T.task { + Task.Anon { ( classesPathTask(), m.javacOptions() ++ m.mandatoryJavacOptions(), diff --git a/bsp/worker/src/mill/bsp/worker/MillJvmBuildServer.scala b/bsp/worker/src/mill/bsp/worker/MillJvmBuildServer.scala index 0d3e67f87ff..d338c5b7f09 100644 --- a/bsp/worker/src/mill/bsp/worker/MillJvmBuildServer.scala +++ b/bsp/worker/src/mill/bsp/worker/MillJvmBuildServer.scala @@ -13,7 +13,7 @@ import ch.epfl.scala.bsp4j.{ JvmTestEnvironmentParams, JvmTestEnvironmentResult } -import mill.T +import mill.Task import mill.bsp.worker.Utils.sanitizeUri import mill.scalalib.api.CompilationResult import mill.scalalib.{JavaModule, TestModule} @@ -55,7 +55,7 @@ private trait MillJvmBuildServer extends JvmBuildServer { this: MillBuildServer case m: TestModule => m.getTestEnvironmentVars() case _ => m.compile } - T.task { + Task.Anon { ( m.runClasspath(), m.forkArgs(), diff --git a/bsp/worker/src/mill/bsp/worker/MillScalaBuildServer.scala b/bsp/worker/src/mill/bsp/worker/MillScalaBuildServer.scala index a34a6ab037a..08dded47156 100644 --- a/bsp/worker/src/mill/bsp/worker/MillScalaBuildServer.scala +++ b/bsp/worker/src/mill/bsp/worker/MillScalaBuildServer.scala @@ -13,7 +13,7 @@ import ch.epfl.scala.bsp4j.{ ScalacOptionsParams, ScalacOptionsResult } -import mill.{Agg, T} +import mill.{Agg, Task} import mill.bsp.worker.Utils.sanitizeUri import mill.util.Jvm import mill.scalalib.{JavaModule, ScalaModule, TestModule, UnresolvedPath} @@ -35,13 +35,13 @@ private trait MillScalaBuildServer extends ScalaBuildServer { this: MillBuildSer case m: JavaModule => val scalacOptionsTask = m match { case m: ScalaModule => m.allScalacOptions - case _ => T.task { Seq.empty[String] } + case _ => Task.Anon { Seq.empty[String] } } val compileClasspathTask = if (enableJvmCompileClasspathProvider) { // We have a dedicated request for it - T.task { Agg.empty[UnresolvedPath] } + Task.Anon { Agg.empty[UnresolvedPath] } } else { m.bspCompileClasspath } @@ -53,7 +53,7 @@ private trait MillScalaBuildServer extends ScalaBuildServer { this: MillBuildSer m.bspCompileClassesPath } - T.task { + Task.Anon { (scalacOptionsTask(), compileClasspathTask(), classesPathTask()) } } @@ -85,7 +85,7 @@ private trait MillScalaBuildServer extends ScalaBuildServer { this: MillBuildSer hint = "buildTarget/scalaMainClasses", targetIds = _ => p.getTargets.asScala.toSeq, tasks = { case m: JavaModule => - T.task((m.zincWorker().worker(), m.compile(), m.forkArgs(), m.forkEnv())) + Task.Anon((m.zincWorker().worker(), m.compile(), m.forkArgs(), m.forkEnv())) } ) { case (ev, state, id, m: JavaModule, (worker, compile, forkArgs, forkEnv)) => @@ -112,9 +112,9 @@ private trait MillScalaBuildServer extends ScalaBuildServer { this: MillBuildSer targetIds = _ => p.getTargets.asScala.toSeq, tasks = { case m: TestModule => - T.task(Some((m.runClasspath(), m.testFramework(), m.testClasspath()))) + Task.Anon(Some((m.runClasspath(), m.testFramework(), m.testClasspath()))) case _ => - T.task(None) + Task.Anon(None) } ) { case (ev, state, id, m: TestModule, Some((classpath, testFramework, testClasspath))) => diff --git a/bsp/worker/src/mill/bsp/worker/State.scala b/bsp/worker/src/mill/bsp/worker/State.scala index e4e2a8b8b22..9db3d21a936 100644 --- a/bsp/worker/src/mill/bsp/worker/State.scala +++ b/bsp/worker/src/mill/bsp/worker/State.scala @@ -7,13 +7,13 @@ import mill.define.Module import mill.eval.Evaluator private class State(workspaceDir: os.Path, evaluators: Seq[Evaluator], debug: String => Unit) { - lazy val bspModulesById: Map[BuildTargetIdentifier, (BspModule, Evaluator)] = { + lazy val bspModulesIdList: Seq[(BuildTargetIdentifier, (BspModule, Evaluator))] = { val modules: Seq[(Module, Seq[Module], Evaluator)] = evaluators .map(ev => (ev.rootModule, JavaModuleUtils.transitiveModules(ev.rootModule), ev)) - val map = modules - .flatMap { case (rootModule, otherModules, eval) => - (Seq(rootModule) ++ otherModules).collect { + modules + .flatMap { case (rootModule, modules, eval) => + modules.collect { case m: BspModule => val uri = Utils.sanitizeUri( rootModule.millSourcePath / @@ -24,9 +24,10 @@ private class State(workspaceDir: os.Path, evaluators: Seq[Evaluator], debug: St (new BuildTargetIdentifier(uri), (m, eval)) } } - .toMap + } + lazy val bspModulesById: Map[BuildTargetIdentifier, (BspModule, Evaluator)] = { + val map = bspModulesIdList.toMap debug(s"BspModules: ${map.view.mapValues(_._1.bspDisplayName).toMap}") - map } diff --git a/bsp/worker/src/mill/bsp/worker/SyntheticRootBspBuildTargetData.scala b/bsp/worker/src/mill/bsp/worker/SyntheticRootBspBuildTargetData.scala index 45302e0ce97..c1891628f92 100644 --- a/bsp/worker/src/mill/bsp/worker/SyntheticRootBspBuildTargetData.scala +++ b/bsp/worker/src/mill/bsp/worker/SyntheticRootBspBuildTargetData.scala @@ -5,7 +5,6 @@ import mill.bsp.worker.Utils.{makeBuildTarget, sanitizeUri} import mill.scalalib.bsp.{BspBuildTarget, BspModule} import mill.scalalib.bsp.BspModule.Tag -import java.util.UUID import scala.jdk.CollectionConverters._ import ch.epfl.scala.bsp4j.BuildTarget @@ -15,11 +14,11 @@ import ch.epfl.scala.bsp4j.BuildTarget */ class SyntheticRootBspBuildTargetData(topLevelProjectRoot: os.Path) { val id: BuildTargetIdentifier = new BuildTargetIdentifier( - Utils.sanitizeUri(topLevelProjectRoot / s"synth-build-target-${UUID.randomUUID()}") + Utils.sanitizeUri(topLevelProjectRoot / "mill-synthetic-root-target") ) val bt: BspBuildTarget = BspBuildTarget( - displayName = Some(topLevelProjectRoot.last + "-root"), + displayName = Some("mill-synthetic-root"), baseDirectory = Some(topLevelProjectRoot), tags = Seq(Tag.Manual), languageIds = Seq.empty, diff --git a/bsp/worker/src/mill/bsp/worker/Utils.scala b/bsp/worker/src/mill/bsp/worker/Utils.scala index 5fe3f57b190..7b96c76a88e 100644 --- a/bsp/worker/src/mill/bsp/worker/Utils.scala +++ b/bsp/worker/src/mill/bsp/worker/Utils.scala @@ -34,7 +34,7 @@ private object Utils { originId: String, bspIdsByModule: Map[BspModule, BuildTargetIdentifier], client: BuildClient - ): Int => Option[CompileProblemReporter] = { moduleHashCode: Int => + ): Int => Option[CompileProblemReporter] = { (moduleHashCode: Int) => bspIdsByModule.find(_._1.hashCode == moduleHashCode).map { case (module: JavaModule, targetId) => val buildTarget = module.bspBuildTarget diff --git a/build.mill b/build.mill index 0387e6a0792..92294b6f037 100644 --- a/build.mill +++ b/build.mill @@ -5,13 +5,11 @@ import coursier.maven.MavenRepository import de.tobiasroeser.mill.vcs.version.VcsVersion import com.goyeau.mill.scalafix.ScalafixModule import mill._ -import mill.api.JarManifest import mill.define.NamedTask import mill.main.Tasks import mill.scalalib._ import mill.scalalib.api.ZincWorkerUtil import mill.scalalib.publish._ -import mill.util.Jvm import mill.resolve.SelectMode import mill.T import mill.define.Cross @@ -20,6 +18,7 @@ import mill.define.Cross import $meta._ import $file.ci.shared import $file.ci.upload +import $packages._ object Settings { val pomOrg = "com.lihaoyi" @@ -31,20 +30,21 @@ object Settings { val docUrl = "https://mill-build.org" // the exact branches containing a doc root val docBranches = Seq() - // the exact tags containing a doc root + // the exact tags containing a doc root. Publish docs for + // the last point version in each minor release series val legacyDocTags: Seq[String] = Seq( "0.9.12", - "0.10.0", - "0.10.12", - "0.10.15", - "0.11.0-M7" + "0.10.15" ) val docTags: Seq[String] = Seq( - "0.11.10", - "0.11.11", - "0.11.12" + "0.11.13", + "0.12.5" ) - val mimaBaseVersions: Seq[String] = 0.to(12).map("0.11." + _) + val mimaBaseVersions: Seq[String] = + 0.to(13).map("0.11." + _) ++ + Seq("0.12.0", "0.12.1", "0.12.2", "0.12.3", "0.12.4", "0.12.5") + + val graalvmJvmId = "graalvm-community:23.0.1" } object Deps { @@ -52,13 +52,12 @@ object Deps { // The Scala version to use // When updating, run "Publish Bridges" Github Actions for the new version // and then add to it `bridgeScalaVersions` - val scalaVersion = "2.13.14" - // Scoverage 1.x will not get releases for newer Scala versions - val scalaVersionForScoverageWorker1 = "2.13.8" + val scalaVersion = "2.13.15" + val scala2Version = "2.13.15" // The Scala 2.12.x version to use for some workers - val workerScalaVersion212 = "2.12.19" + val workerScalaVersion212 = "2.12.20" - val testScala213Version = "2.13.14" + val testScala213Version = "2.13.15" // Scala Native 4.2 will not get releases for new Scala version val testScala213VersionForScalaNative42 = "2.13.8" val testScala212Version = "2.12.6" @@ -66,7 +65,7 @@ object Deps { val testScala33Version = "3.3.1" object Scalajs_1 { - val scalaJsVersion = "1.16.0" + val scalaJsVersion = "1.17.0" val scalajsEnvJsdomNodejs = ivy"org.scala-js::scalajs-env-jsdom-nodejs:1.1.0" val scalajsEnvExoegoJsdomNodejs = ivy"net.exoego::scalajs-env-jsdom-nodejs:2.1.0" val scalajsEnvNodejs = ivy"org.scala-js::scalajs-env-nodejs:1.4.0" @@ -78,7 +77,7 @@ object Deps { } object Scalanative_0_5 { - val scalanativeVersion = "0.5.4" + val scalanativeVersion = "0.5.6" val scalanativeTools = ivy"org.scala-native::tools:${scalanativeVersion}" val scalanativeUtil = ivy"org.scala-native::util:${scalanativeVersion}" val scalanativeNir = ivy"org.scala-native::nir:${scalanativeVersion}" @@ -106,21 +105,23 @@ object Deps { val playVersion = "2.8.22" } object Play_2_9 extends Play { - val playVersion = "2.9.5" + val playVersion = "2.9.6" } object Play_3_0 extends Play { - val playVersion = "3.0.5" + val playVersion = "3.0.6" } val play = Seq(Play_3_0, Play_2_9, Play_2_8, Play_2_7, Play_2_6).map(p => (p.playBinVersion, p)).toMap - val acyclic = ivy"com.lihaoyi:::acyclic:0.3.12" - val ammoniteVersion = "3.0.0-M2-15-9bed9700" - val asmTree = ivy"org.ow2.asm:asm-tree:9.7" + val acyclic = ivy"com.lihaoyi:::acyclic:0.3.15" + val ammoniteVersion = "3.0.0-2-6342755f" + val asmTree = ivy"org.ow2.asm:asm-tree:9.7.1" val bloopConfig = ivy"ch.epfl.scala::bloop-config:1.5.5" - val coursier = ivy"io.get-coursier::coursier:2.1.12" - val coursierInterface = ivy"io.get-coursier:interface:1.0.19" + val coursierVersion = "2.1.24" + val coursier = ivy"io.get-coursier::coursier:$coursierVersion" + val coursierInterface = ivy"io.get-coursier:interface:1.0.27" + val coursierJvm = ivy"io.get-coursier::coursier-jvm:$coursierVersion" val cask = ivy"com.lihaoyi::cask:0.9.4" val castor = ivy"com.lihaoyi::castor:0.3.0" @@ -129,29 +130,29 @@ object Deps { val graphvizJava = Seq( ivy"guru.nidi:graphviz-java-min-deps:0.18.1", ivy"org.webjars.npm:viz.js-graphviz-java:2.1.3", - ivy"org.apache.xmlgraphics:batik-rasterizer:1.17" + ivy"org.apache.xmlgraphics:batik-rasterizer:1.18" ) - val junixsocket = ivy"com.kohlschutter.junixsocket:junixsocket-core:2.10.0" val jgraphtCore = ivy"org.jgrapht:jgrapht-core:1.4.0" // 1.5.0+ dont support JDK8 val javet = Seq( - ivy"com.caoccao.javet:javet:3.1.6", - ivy"com.caoccao.javet:javet-linux-arm64:3.1.6", - ivy"com.caoccao.javet:javet-macos:3.1.6" + ivy"com.caoccao.javet:javet:4.0.0", + ivy"com.caoccao.javet:javet-linux-arm64:4.0.0", + ivy"com.caoccao.javet:javet-macos:4.0.0" ) - val jline = ivy"org.jline:jline:3.26.3" - val jnaVersion = "5.14.0" + val jline = ivy"org.jline:jline:3.28.0" + val jnaVersion = "5.16.0" + val jna = ivy"net.java.dev.jna:jna:${jnaVersion}" val jnaPlatform = ivy"net.java.dev.jna:jna-platform:${jnaVersion}" val junitInterface = ivy"com.github.sbt:junit-interface:0.13.3" - val commonsIO = ivy"commons-io:commons-io:2.16.1" - val log4j2Core = ivy"org.apache.logging.log4j:log4j-core:2.23.1" - val osLib = ivy"com.lihaoyi::os-lib:0.10.7" + val commonsIo = ivy"commons-io:commons-io:2.18.0" + val log4j2Core = ivy"org.apache.logging.log4j:log4j-core:2.24.3" + val osLib = ivy"com.lihaoyi::os-lib:0.11.4-M4" val pprint = ivy"com.lihaoyi::pprint:0.9.0" - val mainargs = ivy"com.lihaoyi::mainargs:0.7.4" - val millModuledefsVersion = "0.11.0-M2" + val mainargs = ivy"com.lihaoyi::mainargs:0.7.6" + val millModuledefsVersion = "0.11.2" val millModuledefsString = s"com.lihaoyi::mill-moduledefs:${millModuledefsVersion}" val millModuledefs = ivy"${millModuledefsString}" val millModuledefsPlugin = @@ -160,12 +161,10 @@ object Deps { val testng = ivy"org.testng:testng:7.5.1" val sbtTestInterface = ivy"org.scala-sbt:test-interface:1.0" def scalaCompiler(scalaVersion: String) = ivy"org.scala-lang:scala-compiler:${scalaVersion}" - // last scalafmt release supporting Java 8 is 3.7.15 - val scalafmtDynamic = ivy"org.scalameta::scalafmt-dynamic:3.7.15" // scala-steward:off + val scalafmtDynamic = ivy"org.scalameta::scalafmt-dynamic:3.8.3" def scalap(scalaVersion: String) = ivy"org.scala-lang:scalap:${scalaVersion}" def scalaReflect(scalaVersion: String) = ivy"org.scala-lang:scala-reflect:${scalaVersion}" - val scalacScoveragePlugin = ivy"org.scoverage:::scalac-scoverage-plugin:1.4.11" - val scoverage2Version = "2.1.1" + val scoverage2Version = "2.2.1" val scalacScoverage2Plugin = ivy"org.scoverage:::scalac-scoverage-plugin:${scoverage2Version}" val scalacScoverage2Reporter = ivy"org.scoverage::scalac-scoverage-reporter:${scoverage2Version}" val scalacScoverage2Domain = ivy"org.scoverage::scalac-scoverage-domain:${scoverage2Version}" @@ -175,45 +174,90 @@ object Deps { val scalatags = ivy"com.lihaoyi::scalatags:0.12.0" def scalaXml = ivy"org.scala-lang.modules::scala-xml:2.3.0" // keep in sync with doc/antora/antory.yml - val semanticDBscala = ivy"org.scalameta:::semanticdb-scalac:4.9.9" - val semanticDbJava = ivy"com.sourcegraph:semanticdb-java:0.10.0" - val sourcecode = ivy"com.lihaoyi::sourcecode:0.3.1" + val semanticDBscala = ivy"org.scalameta:::semanticdb-scalac:4.12.3" + val semanticDbJava = ivy"com.sourcegraph:semanticdb-java:0.10.3" + val sourcecode = ivy"com.lihaoyi::sourcecode:0.4.2" val upickle = ivy"com.lihaoyi::upickle:3.3.1" - val windowsAnsi = ivy"io.github.alexarchambault.windows-ansi:windows-ansi:0.0.5" - val zinc = ivy"org.scala-sbt::zinc:1.10.1" + val windowsAnsi = ivy"io.github.alexarchambault.windows-ansi:windows-ansi:0.0.6" + val zinc = ivy"org.scala-sbt::zinc:1.10.7" // keep in sync with doc/antora/antory.yml val bsp4j = ivy"ch.epfl.scala:bsp4j:2.2.0-M2" val fansi = ivy"com.lihaoyi::fansi:0.5.0" val jarjarabrams = ivy"com.eed3si9n.jarjarabrams::jarjar-abrams-core:1.14.0" val requests = ivy"com.lihaoyi::requests:0.9.0" - val logback = ivy"ch.qos.logback:logback-classic:1.5.7" + val logback = ivy"ch.qos.logback:logback-classic:1.5.16" val sonatypeCentralClient = ivy"com.lumidion::sonatype-central-client-requests:0.3.0" + val kotlinVersion = "2.0.21" + val kotlinCompiler = ivy"org.jetbrains.kotlin:kotlin-compiler:$kotlinVersion" + val mavenVersion = "3.9.9" + val mavenEmbedder = ivy"org.apache.maven:maven-embedder:$mavenVersion" + val mavenResolverVersion = "1.9.22" + val mavenResolverConnectorBasic = + ivy"org.apache.maven.resolver:maven-resolver-connector-basic:$mavenResolverVersion" + val mavenResolverSupplier = + ivy"org.apache.maven.resolver:maven-resolver-supplier:$mavenResolverVersion" + val mavenResolverTransportFile = + ivy"org.apache.maven.resolver:maven-resolver-transport-file:$mavenResolverVersion" + val mavenResolverTransportHttp = + ivy"org.apache.maven.resolver:maven-resolver-transport-http:$mavenResolverVersion" + val mavenResolverTransportWagon = + ivy"org.apache.maven.resolver:maven-resolver-transport-wagon:$mavenResolverVersion" + val coursierJvmIndexVersion = "0.0.4-70-51469f" object RuntimeDeps { + val dokkaVersion = "2.0.0" + val koverVersion = "0.8.3" + + val detektCli = ivy"io.gitlab.arturbosch.detekt:detekt-cli:1.23.7" + val dokkaAnalysisDescriptors = + ivy"org.jetbrains.dokka:analysis-kotlin-descriptors:$dokkaVersion" + val dokkaBase = ivy"org.jetbrains.dokka:dokka-base:$dokkaVersion" + val dokkaCli = ivy"org.jetbrains.dokka:dokka-cli:$dokkaVersion" val errorProneCore = ivy"com.google.errorprone:error_prone_core:2.31.0" - val jupiterInterface = ivy"com.github.sbt.junit:jupiter-interface:0.11.4" + val freemarker = ivy"org.freemarker:freemarker:2.3.34" + val jupiterInterface = ivy"com.github.sbt.junit:jupiter-interface:0.13.3" + val kotlinxHtmlJvm = ivy"org.jetbrains.kotlinx:kotlinx-html-jvm:0.11.0" + val koverCli = ivy"org.jetbrains.kotlinx:kover-cli:$koverVersion" + val koverJvmAgent = ivy"org.jetbrains.kotlinx:kover-jvm-agent:$koverVersion" + val ktfmt = ivy"com.facebook:ktfmt:0.53" + val ktlint = ivy"com.pinterest.ktlint:ktlint-core:0.49.1" val sbtTestInterface = ivy"com.github.sbt:junit-interface:0.13.2" - def all = Seq(errorProneCore, jupiterInterface, sbtTestInterface) + + def all = Seq( + detektCli, + dokkaAnalysisDescriptors, + dokkaBase, + dokkaCli, + errorProneCore, + freemarker, + jupiterInterface, + kotlinxHtmlJvm, + koverCli, + koverJvmAgent, + ktfmt, + ktlint, + sbtTestInterface + ) } /** Used to manage transitive versions. */ - val transitiveDeps = Seq( - ivy"org.apache.ant:ant:1.10.14", - ivy"commons-io:commons-io:2.16.1", + lazy val transitiveDeps = Seq( + ivy"org.apache.ant:ant:1.10.15", + Deps.commonsIo, ivy"com.google.code.gson:gson:2.11.0", - ivy"com.google.protobuf:protobuf-java:4.28.0", - ivy"com.google.guava:guava:33.3.0-jre", - ivy"org.yaml:snakeyaml:2.2", - ivy"org.apache.commons:commons-compress:1.26.2" + ivy"com.google.protobuf:protobuf-java:4.29.2", + ivy"com.google.guava:guava:33.4.0-jre", + ivy"org.yaml:snakeyaml:2.3", + ivy"org.apache.commons:commons-compress:1.27.1" ) /** Used in tests. */ object TestDeps { // tests framework (test) - val scalaCheck = ivy"org.scalacheck::scalacheck:1.18.0" + val scalaCheck = ivy"org.scalacheck::scalacheck:1.18.1" val scalaTest = ivy"org.scalatest::scalatest:3.2.19" val utest = ivy"com.lihaoyi::utest:0.8.4" - val zioTest = ivy"dev.zio::zio-test:2.0.22" + val zioTest = ivy"dev.zio::zio-test:2.1.14" } /** Used in documentation. */ @@ -222,15 +266,18 @@ object Deps { } } -def millVersion: T[String] = T { VcsVersion.vcsState().format() } +def millVersion: T[String] = Task.Input { + if (Task.env.contains("MILL_STABLE_VERSION")) VcsVersion.calcVcsState(Task.log).format() + else "SNAPSHOT" +} -def millLastTag: T[String] = T { +def millLastTag: T[String] = Task { VcsVersion.vcsState().lastTag.getOrElse( sys.error("No (last) git tag found. Your git history seems incomplete!") ) } -def millBinPlatform: T[String] = T { +def millBinPlatform: T[String] = Task { // val tag = millLastTag() // if (tag.contains("-M")) tag // else { @@ -243,7 +290,7 @@ def millBinPlatform: T[String] = T { def baseDir = build.millSourcePath val essentialBridgeScalaVersions = - Seq(Deps.scalaVersion, Deps.scalaVersionForScoverageWorker1, Deps.workerScalaVersion212) + Seq(Deps.scalaVersion, Deps.workerScalaVersion212) // published compiler bridges val bridgeScalaVersions = Seq( // Our version of Zinc doesn't work with Scala 2.12.0 and 2.12.4 compiler @@ -278,7 +325,8 @@ val bridgeScalaVersions = Seq( "2.13.11", "2.13.12", "2.13.13", - "2.13.14" + "2.13.14", + "2.13.15" ) // We limit the number of compiler bridges to compile and publish for local @@ -298,17 +346,17 @@ val bridgeVersion = "0.0.1" trait MillJavaModule extends JavaModule { // Test setup - def testDep = T { (s"com.lihaoyi-${artifactId()}", testDepPaths().map(_.path).mkString("\n")) } + def testDep = Task { (s"com.lihaoyi-${artifactId()}", testDepPaths().map(_.path).mkString("\n")) } // Workaround for Zinc/JNA bug // https://github.com/sbt/sbt/blame/6718803ee6023ab041b045a6988fafcfae9d15b5/main/src/main/scala/sbt/Main.scala#L130 - def testArgs: T[Seq[String]] = T { Seq("-Djna.nosys=true") } - def testDepPaths = T { upstreamAssemblyClasspath() ++ Seq(compile().classes) ++ resources() } + def testArgs: T[Seq[String]] = Task { Seq("-Djna.nosys=true") } + def testDepPaths = Task { upstreamAssemblyClasspath() ++ Seq(compile().classes) ++ resources() } - def testTransitiveDeps: T[Map[String, String]] = T { - val upstream = T.traverse(moduleDeps ++ compileModuleDeps) { + def testTransitiveDeps: T[Map[String, String]] = Task { + val upstream = Task.traverse(moduleDeps ++ compileModuleDeps) { case m: MillJavaModule => m.testTransitiveDeps.map(Some(_)) - case _ => T.task(None) + case _ => Task.Anon(None) }().flatten.flatten val current = Seq(testDep()) upstream.toMap ++ current @@ -319,28 +367,28 @@ trait MillJavaModule extends JavaModule { if (this == build.main) Seq(build.main) else Seq(this, build.main.test) - def writeLocalTestOverrides = T.task { + def writeLocalTestOverrides = Task.Anon { for ((k, v) <- testTransitiveDeps()) { - os.write(T.dest / "mill" / "local-test-overrides" / k, v, createFolders = true) + os.write(Task.dest / "mill" / "local-test-overrides" / k, v, createFolders = true) } - Seq(PathRef(T.dest)) + Seq(PathRef(Task.dest)) } def runClasspath = super.runClasspath() ++ writeLocalTestOverrides() - def repositoriesTask = T.task { + def repositoriesTask = Task.Anon { super.repositoriesTask() ++ Seq(MavenRepository("https://oss.sonatype.org/content/repositories/releases")) } - def mapDependencies: Task[coursier.Dependency => coursier.Dependency] = T.task { + def mapDependencies: Task[coursier.Dependency => coursier.Dependency] = Task.Anon { super.mapDependencies().andThen { dep => forcedVersions.find(f => f.dep.module.organization.value == dep.module.organization.value && f.dep.module.name.value == dep.module.name.value ).map { forced => val newDep = dep.withVersion(forced.dep.version) - T.log.debug(s"Forcing version of ${dep.module} from ${dep.version} to ${newDep.version}") + Task.log.debug(s"Forcing version of ${dep.module} from ${dep.version} to ${newDep.version}") newDep }.getOrElse(dep) } @@ -349,6 +397,14 @@ trait MillJavaModule extends JavaModule { Deps.jline, Deps.jna ) + + def javadocOptions = super.javadocOptions() ++ Seq( + // Disable warnings for missing documentation comments or tags (for example, + // a missing comment or class, or a missing @return tag or similar tag on a method). + // We have many methods without JavaDoc comments, so those warnings are useless + // and significantly clutter the output. + "-Xdoclint:all,-missing" + ) } trait MillPublishJavaModule extends MillJavaModule with PublishModule { @@ -388,7 +444,12 @@ trait MillScalaModule extends ScalaModule with MillJavaModule with ScalafixModul "-P:acyclic:force", "-feature", "-Xlint:unused", - "-Xlint:adapted-args" + "-Xlint:adapted-args", + "-Xsource:3", + "-Wconf:msg=inferred type changes:silent", + "-Wconf:msg=case companions no longer extend FunctionN:silent", + "-Wconf:msg=access modifiers for:silent", + "-Wconf:msg=found in a package prefix of the required type:silent" ) def scalacPluginIvyDeps = @@ -407,11 +468,12 @@ trait MillScalaModule extends ScalaModule with MillJavaModule with ScalafixModul def moduleDeps = outer.testModuleDeps def ivyDeps = super.ivyDeps() ++ outer.testIvyDeps() def forkEnv = super.forkEnv() ++ outer.forkEnv() + override def testForkGrouping = discoveredTestClasses().grouped(1).toSeq } } trait MillBaseTestsModule extends TestModule { - def forkArgs = T { + def forkArgs = Task { Seq( s"-DMILL_SCALA_2_13_VERSION=${Deps.scalaVersion}", s"-DMILL_SCALA_2_12_VERSION=${Deps.workerScalaVersion212}", @@ -426,7 +488,8 @@ trait MillBaseTestsModule extends TestModule { s"-DTEST_SCALATEST_VERSION=${Deps.TestDeps.scalaTest.dep.version}", s"-DTEST_TEST_INTERFACE_VERSION=${Deps.sbtTestInterface.dep.version}", s"-DTEST_ZIOTEST_VERSION=${Deps.TestDeps.zioTest.dep.version}", - s"-DTEST_ZINC_VERSION=${Deps.zinc.dep.version}" + s"-DTEST_ZINC_VERSION=${Deps.zinc.dep.version}", + s"-DTEST_KOTLIN_VERSION=${Deps.kotlinCompiler.dep.version}" ) } @@ -443,7 +506,9 @@ trait MillStableScalaModule extends MillPublishScalaModule with Mima { // (5x) MIMA doesn't properly ignore things which are nested inside other private things // so we have to put explicit ignores here (https://github.com/lightbend/mima/issues/771) ProblemFilter.exclude[Problem]("mill.eval.ProfileLogger*"), + ProblemFilter.exclude[Problem]("mill.eval.ChromeProfileLogger*"), ProblemFilter.exclude[Problem]("mill.eval.GroupEvaluator*"), + ProblemFilter.exclude[Problem]("mill.eval.EvaluatorCore*"), ProblemFilter.exclude[Problem]("mill.eval.Tarjans*"), ProblemFilter.exclude[Problem]("mill.define.Ctx#Impl*"), ProblemFilter.exclude[Problem]("mill.resolve.ResolveNotFoundHandler*"), @@ -527,11 +592,16 @@ trait MillStableScalaModule extends MillPublishScalaModule with Mima { // https://github.com/com-lihaoyi/mill/pull/3503 ProblemFilter.exclude[ReversedMissingMethodProblem]( "mill.scalalib.ScalaModule#ScalaTests.mill$scalalib$ScalaModule$ScalaTests$$super$mandatoryScalacOptions" - ) + ), + // Not sure why Mima is complaining when these are internal and private + ProblemFilter.exclude[Problem]("*.bspJvmBuildTarget"), + ProblemFilter.exclude[Problem]("mill.scalalib.RunModule#RunnerImpl.*"), + ProblemFilter.exclude[Problem]("mill.util.PromptLogger#*"), + ProblemFilter.exclude[Problem]("mill.util.PromptLoggerUtil.*") ) def mimaPreviousVersions: T[Seq[String]] = Settings.mimaBaseVersions - def mimaPreviousArtifacts: T[Agg[Dep]] = T { + def mimaPreviousArtifacts: T[Agg[Dep]] = Task { Agg.from( Settings.mimaBaseVersions .filter(v => !skipPreviousVersions().contains(v)) @@ -559,28 +629,29 @@ trait BridgeModule extends MillPublishJavaModule with CrossScalaModule { ivy"org.scala-lang:scala-compiler:${crossScalaVersion}" ) - def resources = T.sources { - os.copy(generatedSources().head.path / "META-INF", T.dest / "META-INF") - Seq(PathRef(T.dest)) + def resources = Task { + os.copy(generatedSources().head.path / "META-INF", Task.dest / "META-INF") + Seq(PathRef(Task.dest)) } def compilerBridgeIvyDeps: T[Agg[Dep]] = Agg( ivy"org.scala-sbt::compiler-bridge:${Deps.zinc.version}".exclude("*" -> "*") ) - def compilerBridgeSourceJars: T[Agg[PathRef]] = T { + def compilerBridgeSourceJars: T[Agg[PathRef]] = Task { resolveDeps( - T.task { compilerBridgeIvyDeps().map(bindDependency()) }, + Task.Anon { compilerBridgeIvyDeps().map(bindDependency()) }, sources = true )() } - def generatedSources = T { + def generatedSources = Task { + compilerBridgeSourceJars().foreach { jar => - mill.api.IO.unpackZip(jar.path, os.rel) + os.unzip(jar.path, Task.dest) } - Seq(PathRef(T.dest)) + Seq(PathRef(Task.dest)) } } @@ -589,406 +660,12 @@ def formatDep(dep: Dep) = { s"${d.module.organization.value}:${d.module.name.value}:${d.version}" } -val DefaultLocalMillReleasePath = - s"target/mill-release${if (scala.util.Properties.isWin) ".bat" else ""}" - def listIn(path: os.Path) = interp.watchValue(os.list(path).map(_.last)) -def launcherScript( - shellJvmArgs: Seq[String], - cmdJvmArgs: Seq[String], - shellClassPath: Agg[String], - cmdClassPath: Agg[String] -) = { - - val millMainClass = "mill.runner.client.MillClientMain" - - Jvm.universalScript( - shellCommands = { - val jvmArgsStr = shellJvmArgs.mkString(" ") - def java(mainClass: String, passMillJvmOpts: Boolean) = { - val millJvmOpts = if (passMillJvmOpts) "$mill_jvm_opts" else "" - s"""exec "$$JAVACMD" $jvmArgsStr $$JAVA_OPTS $millJvmOpts -cp "${shellClassPath.mkString( - ":" - )}" $mainClass "$$@"""" - } - - s"""if [ -z "$$JAVA_HOME" ] ; then - | JAVACMD="java" - |else - | JAVACMD="$$JAVA_HOME/bin/java" - |fi - | - |mill_jvm_opts="" - |init_mill_jvm_opts () { - | if [ -z $$MILL_JVM_OPTS_PATH ] ; then - | mill_jvm_opts_file=".mill-jvm-opts" - | else - | mill_jvm_opts_file=$$MILL_JVM_OPTS_PATH - | fi - | - | if [ -f "$$mill_jvm_opts_file" ] ; then - | # We need to append a newline at the end to fix - | # https://github.com/com-lihaoyi/mill/issues/2140 - | newline=" - |" - | mill_jvm_opts="$$( - | echo "$$newline" | cat "$$mill_jvm_opts_file" - | ( - | while IFS= read line - | do - | mill_jvm_opts="$${mill_jvm_opts} $$(echo $$line | grep -v "^[[:space:]]*[#]")" - | done - | # we are in a sub-shell, so need to return it explicitly - | echo "$${mill_jvm_opts}" - | ) - | )" - | mill_jvm_opts="$${mill_jvm_opts} -Dmill.jvm_opts_applied=true" - | fi - |} - | - |# Client-server mode doesn't seem to work on WSL, just disable it for now - |# https://stackoverflow.com/a/43618657/871202 - |if grep -qEi "(Microsoft|WSL)" /proc/version > /dev/null 2> /dev/null ; then - | init_mill_jvm_opts - | if [ -z $$COURSIER_CACHE ] ; then - | COURSIER_CACHE=.coursier - | fi - | ${java(millMainClass, true)} - |else - | if [ "$${1%"-i"*}" != "$$1" ] ; then # first arg starts with "-i" - | init_mill_jvm_opts - | ${java(millMainClass, true)} - | else - | case "$$1" in - | -i | --interactive | --repl | --no-server | --bsp ) - | init_mill_jvm_opts - | ${java(millMainClass, true)} - | ;; - | *) - | ${java(millMainClass, false)} - | ;; - | esac - | fi - |fi - |""".stripMargin - }, - cmdCommands = { - val jvmArgsStr = cmdJvmArgs.mkString(" ") - def java(mainClass: String, passMillJvmOpts: Boolean) = { - val millJvmOpts = if (passMillJvmOpts) "!mill_jvm_opts!" else "" - s""""%JAVACMD%" $jvmArgsStr %JAVA_OPTS% $millJvmOpts -cp "${cmdClassPath.mkString( - ";" - )}" $mainClass %*""" - } - - s"""setlocal EnableDelayedExpansion - |set "JAVACMD=java.exe" - |if not "%JAVA_HOME%"=="" set "JAVACMD=%JAVA_HOME%\\bin\\java.exe" - |if "%1" == "-i" set _I_=true - |if "%1" == "--interactive" set _I_=true - |if "%1" == "--repl" set _I_=true - |if "%1" == "--no-server" set _I_=true - |if "%1" == "--bsp" set _I_=true - | - |set "mill_jvm_opts=" - |set "mill_jvm_opts_file=.mill-jvm-opts" - |if not "%MILL_JVM_OPTS_PATH%"=="" set "mill_jvm_opts_file=%MILL_JVM_OPTS_PATH%" - | - |if defined _I_ ( - | if exist %mill_jvm_opts_file% ( - | for /f "delims=" %%G in (%mill_jvm_opts_file%) do ( - | set line=%%G - | if "!line:~0,2!"=="-X" set "mill_jvm_opts=!mill_jvm_opts! !line!" - | ) - | ) - | ${java(millMainClass, true)} - |) else ( - | ${java(millMainClass, false)} - |) - |endlocal - |""".stripMargin - } - ) -} object idea extends MillPublishScalaModule { def moduleDeps = Seq(build.scalalib, build.runner) } -/** - * Version of [[dist]] meant for local integration testing within the Mill - * repo. Looks mostly the same as [[dist]], except it does not have a reference - * to itself in its [[testTransitiveDeps]], to avoid a circular dependency. - */ -object dist0 extends MillPublishJavaModule { - // disable scalafix here because it crashes when a module has no sources - def fix(args: String*): Command[Unit] = T.command {} - def moduleDeps = Seq(build.runner, idea) - - def testTransitiveDeps = build.runner.testTransitiveDeps() ++ Seq( - build.main.graphviz.testDep(), - build.runner.linenumbers.testDep(), - build.scalalib.backgroundwrapper.testDep(), - build.contrib.bloop.testDep(), - build.contrib.buildinfo.testDep(), - build.contrib.scoverage.testDep(), - build.contrib.scoverage.worker2.testDep(), - build.contrib.jmh.testDep(), - build.contrib.playlib.testDep(), - build.contrib.playlib.worker("2.8").testDep(), - build.contrib.errorprone.testDep(), - build.contrib.checkstyle.testDep(), - build.bsp.worker.testDep(), - build.testkit.testDep() - ) -} - -object dist extends MillPublishJavaModule { - def jar = rawAssembly() - def moduleDeps = Seq(build.runner, idea) - - def testTransitiveDeps = dist0.testTransitiveDeps() ++ Seq( - (s"com.lihaoyi-${dist.artifactId()}", dist0.runClasspath().map(_.path).mkString("\n")) - ) - - def genTask(m: ScalaModule) = T.task { Seq(m.jar(), m.sourceJar()) ++ m.runClasspath() } - - def forkArgs: T[Seq[String]] = T { - val genIdeaArgs = - genTask(build.main.define)() ++ - genTask(build.main.eval)() ++ - genTask(build.main)() ++ - genTask(build.scalalib)() ++ - genTask(build.scalajslib)() ++ - genTask(build.scalanativelib)() - - testArgs() ++ - Seq( - "-DMILL_CLASSPATH=" + runClasspath().map(_.path.toString).mkString(","), - "-DMILL_BUILD_LIBRARIES=" + genIdeaArgs.map(_.path).mkString(","), - s"-DBSP4J_VERSION=${Deps.bsp4j.dep.version}" - ) - } - - def launcher = T { - val isWin = scala.util.Properties.isWin - val outputPath = T.dest / (if (isWin) "run.bat" else "run") - - os.write(outputPath, prependShellScript()) - if (!isWin) os.perms.set(outputPath, "rwxrwxrwx") - - PathRef(outputPath) - } - - def extraPublish: T[Seq[PublishInfo]] = T { - Seq(PublishInfo(file = assembly(), classifier = Some("assembly"), ivyConfig = "compile")) - } - - def assemblyRules = super.assemblyRules ++ Seq( - mill.scalalib.Assembly.Rule.ExcludePattern("mill/local-test-overrides/.*") - ) - - // All modules that we want to aggregate as part of this `dev` assembly. - // Excluding itself, and the `dist` module that uses it - lazy val allPublishModules = build.millInternal.modules.collect { - case m: PublishModule if (m ne this) && (m ne dist) => m - } - - def rawAssembly = T { - val version = millVersion() - val devRunClasspath = runClasspath().map(_.path) - val filename = if (scala.util.Properties.isWin) "mill.bat" else "mill" - val commonArgs = Seq( - // Workaround for Zinc/JNA bug - // https://github.com/sbt/sbt/blame/6718803ee6023ab041b045a6988fafcfae9d15b5/main/src/main/scala/sbt/Main.scala#L130 - "-Djna.nosys=true" - ) - val shellArgs = Seq("-DMILL_CLASSPATH=$0") ++ commonArgs - val cmdArgs = Seq(""""-DMILL_CLASSPATH=%~dpnx0"""") ++ commonArgs - os.move( - mill.scalalib.Assembly.createAssembly( - devRunClasspath, - prependShellScript = launcherScript(shellArgs, cmdArgs, Agg("$0"), Agg("%~dpnx0")), - assemblyRules = assemblyRules - ).path, - T.dest / filename - ) - PathRef(T.dest / filename) - } - def assembly = T { - T.traverse(allPublishModules)(m => m.publishLocalCached)() - val raw = rawAssembly().path - os.copy(raw, T.dest / raw.last) - PathRef(T.dest / raw.last) - } - - def prependShellScript = T { - val (millArgs, otherArgs) = - forkArgs().partition(arg => arg.startsWith("-DMILL") && !arg.startsWith("-DMILL_VERSION")) - // Pass Mill options via file, due to small max args limit in Windows - val vmOptionsFile = T.dest / "mill.properties" - val millOptionsContent = - millArgs.map(_.drop(2).replace("\\", "/")).mkString( - "\r\n" - ) // drop -D prefix, replace \ with / - os.write(vmOptionsFile, millOptionsContent) - val jvmArgs = otherArgs ++ List(s"-DMILL_OPTIONS_PATH=$vmOptionsFile") - val classpath = runClasspath().map(_.path.toString) - launcherScript( - jvmArgs, - jvmArgs, - classpath, - Agg(pathingJar().path.toString) // TODO not working yet on Windows! see #791 - ) - } - - def pathingJar = T { - // see http://todayguesswhat.blogspot.com/2011/03/jar-manifestmf-class-path-referencing.html - // for more detailed explanation - val isWin = scala.util.Properties.isWin - val classpath = runClasspath().map { pathRef => - val path = - if (isWin) "/" + pathRef.path.toString.replace("\\", "/") - else pathRef.path.toString - if (path.endsWith(".jar")) path - else path + "/" - }.mkString(" ") - val manifestEntries = Map[String, String]( - java.util.jar.Attributes.Name.MANIFEST_VERSION.toString -> "1.0", - "Created-By" -> "Scala mill", - "Class-Path" -> classpath - ) - Jvm.createJar(Agg(), JarManifest(manifestEntries)) - } - - def run(args: Task[Args] = T.task(Args())) = T.command { - args().value match { - case Nil => mill.api.Result.Failure("Need to pass in cwd as first argument to dev.run") - case wd0 +: rest => - val wd = os.Path(wd0, T.workspace) - os.makeDir.all(wd) - try { - Jvm.runSubprocess( - Seq(launcher().path.toString) ++ rest, - forkEnv(), - workingDir = wd - ) - mill.api.Result.Success(()) - } catch { - case e: Throwable => - mill.api.Result.Failure(s"dev.run failed with an exception. ${e.getMessage()}") - } - } - } -} - -/** - * Build and install Mill locally. - * - * @param binFile The location where the Mill binary should be installed - * @param ivyRepo The local Ivy repository where Mill modules should be published to - */ -def installLocal(binFile: String = DefaultLocalMillReleasePath, ivyRepo: String = null) = - T.command { - PathRef(installLocalTask(T.task(binFile), ivyRepo)()) - } - -def installLocalCache() = T.command { - val path = installLocalTask( - T.task((os.home / ".cache" / "mill" / "download" / millVersion()).toString()) - )() - T.log.outputStream.println(path.toString()) - PathRef(path) -} - -def installLocalTask(binFile: Task[String], ivyRepo: String = null): Task[os.Path] = T.task { - val millBin = dist.assembly() - val targetFile = os.Path(binFile(), T.workspace) - if (os.exists(targetFile)) - T.log.info(s"Overwriting existing local Mill binary at ${targetFile}") - os.copy.over(millBin.path, targetFile, createFolders = true) - T.log.info(s"Published ${dist.allPublishModules.size} modules and installed ${targetFile}") - targetFile -} - -def millBootstrap = T.sources(T.workspace / "mill") - -def bootstrapLauncher = T { - val outputPath = T.dest / "mill" - val millBootstrapGrepPrefix = "(\n *DEFAULT_MILL_VERSION=)" - val millDownloadUrlPrefix = "(\n *MILL_DOWNLOAD_URL=)" - - os.write( - outputPath, - os.read(millBootstrap().head.path) - .replaceAll( - millBootstrapGrepPrefix + "[^\\n]+", - "$1" + millVersion() - ) - ) - os.perms.set(outputPath, "rwxrwxrwx") - PathRef(outputPath) -} - -def exampleZips: T[Seq[PathRef]] = T { - for { - exampleMod <- build.example.exampleModules - examplePath = exampleMod.millSourcePath - } yield { - val example = examplePath.subRelativeTo(T.workspace) - val exampleStr = millVersion() + "-" + example.segments.mkString("-") - os.copy(examplePath, T.dest / exampleStr, createFolders = true) - os.write(T.dest / exampleStr / ".mill-version", millLastTag()) - os.copy(bootstrapLauncher().path, T.dest / exampleStr / "mill") - val zip = T.dest / s"$exampleStr.zip" - os.proc("zip", "-r", zip, exampleStr).call(cwd = T.dest) - PathRef(zip) - } -} - -def uploadToGithub(authKey: String) = T.command { - val vcsState = VcsVersion.vcsState() - val label = vcsState.format() - if (label != millVersion()) sys.error("Modified mill version detected, aborting upload") - val releaseTag = vcsState.lastTag.getOrElse(sys.error( - "Incomplete git history. No tag found.\nIf on CI, make sure your git checkout job includes enough history." - )) - - if (releaseTag == label) { - // TODO: check if the tag already exists (e.g. because we created it manually) and do not fail - scalaj.http.Http( - s"https://api.github.com/repos/${Settings.githubOrg}/${Settings.githubRepo}/releases" - ) - .postData( - ujson.write( - ujson.Obj( - "tag_name" -> releaseTag, - "name" -> releaseTag - ) - ) - ) - .header("Authorization", "token " + authKey) - .asString - } - - val examples = exampleZips().map(z => (z.path, z.path.last)) - - val zips = examples ++ Seq( - (dist.assembly().path, label + "-assembly"), - (bootstrapLauncher().path, label) - ) - - for ((zip, name) <- zips) { - upload.apply( - zip, - releaseTag, - name, - authKey, - Settings.githubOrg, - Settings.githubRepo - ) - } -} - private def resolveTasks[T](taskNames: String*): Seq[NamedTask[T]] = { mill.resolve.Resolve.Tasks.resolve( build, @@ -1001,8 +678,8 @@ def validate(): Command[Unit] = { val tasks = resolveTasks("__.compile", "__.minaReportBinaryIssues") val sources = resolveTasks("__.sources") - T.command { - T.sequence(tasks)() + Task.Command { + Task.sequence(tasks)() mill.scalalib.scalafmt.ScalafmtModule.checkFormatAll(Tasks(sources))() build.docs.localPages() () @@ -1022,7 +699,9 @@ val dummyDeps: Seq[Dep] = Seq( implicit object DepSegment extends Cross.ToSegments[Dep]({ dep => val depString = formatDep(dep) - List(depString) + // these cross module names cause problems on windows, and anyway they + // are not necessary in order to load the project into IntelliJ anyway + List(depString.replace(':', '_')) }) /** diff --git a/ci/mill.ps1 b/ci/mill.ps1 new file mode 100644 index 00000000000..51fc8fee6ee --- /dev/null +++ b/ci/mill.ps1 @@ -0,0 +1,137 @@ +# This is a wrapper script, that automatically download mill from GitHub release pages +# You can give the required mill version with --mill-version parameter +# If no version is given, it falls back to the value of DEFAULT_MILL_VERSION +# +# Project page: https://github.com/lefou/millw +# Script Version: 0.4.12 +# +# If you want to improve this script, please also contribute your changes back! +# +# Licensed under the Apache License, Version 2.0 + +[CmdletBinding(PositionalBinding = $false)] + +param( + [Parameter(ValueFromRemainingArguments = $true, Position = 0)] + [string[]] $remainingArgs +) + +$DEFAULT_MILL_VERSION = $Env:DEFAULT_MILL_VERSION ?? '0.11.6' + +$GITHUB_RELEASE_CDN = $Env:GITHUB_RELEASE_CDN ?? '' + +$MILL_REPO_URL = 'https://github.com/com-lihaoyi/mill' + +$MILL_VERSION = $null + +if ($null -ne $remainingArgs) { + if ($remainingArgs[0] -eq '--mill-version') { + $remainingArgs = Select-Object -InputObject $remainingArgs -Skip 1 + if ($null -ne $remainingArgs) { + $MILL_VERSION = $remainingArgs[0] + $remainingArgs = Select-Object -InputObject $remainingArgs -Skip 1 + } + else { + Write-Error -Message "Please provide a version that matches one provided on $MILL_REPO_URL/releases" + throw [System.ArgumentNullException] '--mill-version' + } + } +} + +if ($null -eq $MILL_VERSION) { + if (Test-Path -Path '.mill-version' -PathType Leaf) { + $MILL_VERSION = Get-Content -Path '.mill-version' -TotalCount 1 + } + elseif (Test-Path -Path '.config/mill-version' -PathType Leaf) { + $MILL_VERSION = Get-Content -Path '.config/mill-version' -TotalCount 1 + } +} + +$MILL_USER_CACHE_DIR = Join-Path -Path $Env:LOCALAPPDATA -ChildPath 'mill' + +$MILL_DOWNLOAD_PATH = $Env:MILL_DOWNLOAD_PATH ?? @(Join-Path -Path ${MILL_USER_CACHE_DIR} -ChildPath 'download') + +if (-not (Test-Path -Path $MILL_DOWNLOAD_PATH)) { + New-Item -Path $MILL_DOWNLOAD_PATH -ItemType Directory | Out-Null +} + +if ($null -eq $MILL_VERSION) { + Write-Warning -Message 'No mill version specified.' + Write-Warning -Message "You should provide a version via '.mill-version' file or --mill-version option." + + if (-not (Test-Path -Path "$MILL_DOWNLOAD_PATH" -PathType Container)) { + New-Item "$MILL_DOWNLOAD_PATH" -ItemType Directory | Out-Null + } + + $MILL_LATEST_PATH = Join-Path -Path $MILL_DOWNLOAD_PATH -ChildPath '.latest' + + if (Test-Path -Path $MILL_LATEST_PATH -PathType Leaf) { + if ($(Get-Item -Path $MILL_LATEST_PATH).LastWriteTime -lt $(Get-Date).AddHours(-1)) { + $MILL_VERSION = Get-Content -Path $MILL_LATEST_PATH -TotalCount 1 + } + } + + if ($null -eq $MILL_VERSION) { + Write-Output 'Retrieving latest mill version ...' + + # https://github.com/PowerShell/PowerShell/issues/20964 + $targetUrl = try { + Invoke-WebRequest -Uri "$MILL_REPO_URL/releases/latest" -MaximumRedirection 0 + } + catch { + $_.Exception.Response.Headers.Location.AbsoluteUri + } + + $targetUrl -match "^$MILL_REPO_URL/releases/tag/(.+)$" | Out-Null + + $MILL_VERSION = $Matches.1 + + if ($null -ne $MILL_VERSION) { + Set-Content -Path $MILL_LATEST_PATH -Value $MILL_VERSION + } + } + + if ($null -eq $MILL_VERSION) { + $MILL_VERSION = $DEFAULT_MILL_VERSION + Write-Warning "Falling back to hardcoded mill version $MILL_VERSION" + } + else { + Write-Output "Using mill version $MILL_VERSION" + } +} + +$MILL = "$MILL_DOWNLOAD_PATH/$MILL_VERSION.bat" + +if (-not (Test-Path -Path $MILL -PathType Leaf)) { + $DOWNLOAD_SUFFIX, $DOWNLOAD_FROM_MAVEN = switch -Regex ($MILL_VERSION) { + '^0\.[0-4]\..*$' { '', $false } + '0\.(?:[5-9]\.|10\.|11\.0-M).*' { '-assembly', $false } + Default { '-assembly', $true } + } + + if ($DOWNLOAD_FROM_MAVEN) { + $DOWNLOAD_URL = "https://repo1.maven.org/maven2/com/lihaoyi/mill-dist/$MILL_VERSION/mill-dist-$MILL_VERSION.jar" + } + else { + $MILL_VERSION -match '(\d+\.\d+\.\d+(?:-M\d+)?)' | Out-Null + $MILL_VERSION_TAG = $Matches.1 + $DOWNLOAD_URL = "$GITHUB_RELEASE_CDN$MILL_REPO_URL/releases/download/$MILL_VERSION_TAG/$MILL_VERSION$DOWNLOAD_SUFFIX" + } + Write-Output "Downloading mill $MILL_VERSION from $DOWNLOAD_URL ..." + + Invoke-WebRequest -Uri $DOWNLOAD_URL -OutFile $MILL +} + +$MILL_MAIN_CLI = $Env:MILL_MAIN_CLI ?? $PSCommandPath + +$MILL_FIRST_ARG = $null +$REMAINING_ARGUMENTS = $remainingArgs + +if ($null -ne $remainingArgs) { + if ($remainingArgs[0] -eq '--bsp' -or $remainingArgs -eq '-i' -or $remainingArgs -eq '--interactive' -or $remainingArgs -eq '--no-server') { + $MILL_FIRST_ARG = $remainingArgs[0] + $REMAINING_ARGUMENTS = Select-Object -InputObject $remainingArgs -Skip 1 + } +} + +& $MILL $MILL_FIRST_ARG -D "mill.main.cli=$MILL_MAIN_CLI" $REMAINING_ARGUMENTS diff --git a/ci/package.mill b/ci/package.mill index 6a5b3e8b8eb..5449995b6dc 100644 --- a/ci/package.mill +++ b/ci/package.mill @@ -1 +1 @@ -package build.ci \ No newline at end of file +package build.ci diff --git a/ci/release-maven.sh b/ci/release-maven.sh deleted file mode 100755 index 3f0b2d1a41b..00000000000 --- a/ci/release-maven.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env bash - -set -eu - -./mill -i installLocal - -./target/mill-release -i mill.scalalib.PublishModule/publishAll \ No newline at end of file diff --git a/ci/shared.mill b/ci/shared.mill index 71447239625..d28af501855 100644 --- a/ci/shared.mill +++ b/ci/shared.mill @@ -1,48 +1,8 @@ package build.ci -/** - * Utility code that is shared between our SBT build and our Mill build. SBT - * calls this by shelling out to Ammonite in a subprocess, while Mill loads it - * via import $file - */ -import $ivy.`org.scalaj::scalaj-http:2.4.2` -import mainargs.main -def unpackZip(zipDest: os.Path, url: String) = { - println(s"Unpacking zip $url into $zipDest") - os.makeDir.all(zipDest) - val bytes = - scalaj.http.Http.apply(url).option(scalaj.http.HttpOptions.followRedirects(true)).asBytes - val byteStream = new java.io.ByteArrayInputStream(bytes.body) - val zipStream = new java.util.zip.ZipInputStream(byteStream) - while ({ - zipStream.getNextEntry match { - case null => false - case entry => - if (!entry.isDirectory) { - val dest = zipDest / os.SubPath(entry.getName) - os.makeDir.all(dest / os.up) - val fileOut = new java.io.FileOutputStream(dest.toString) - val buffer = new Array[Byte](4096) - while ({ - zipStream.read(buffer) match { - case -1 => false - case n => - fileOut.write(buffer, 0, n) - true - } - }) () - fileOut.close() - } - zipStream.closeEntry() - true - } - }) () -} - -@main def downloadTestRepo(label: String, commit: String, dest: os.Path) = { - unpackZip(dest, s"https://github.com/$label/archive/$commit.zip") + os.unzip.stream(requests.get.stream(s"https://github.com/$label/archive/$commit.zip"), dest) dest } diff --git a/ci/test-mill-bootstrap.sh b/ci/test-mill-bootstrap.sh index 50047066a95..aea6c4cc6b9 100755 --- a/ci/test-mill-bootstrap.sh +++ b/ci/test-mill-bootstrap.sh @@ -7,10 +7,10 @@ git stash -u git stash -a # Build Mill -./mill -i installLocal +./mill -i dist.installLocal # Clean up -git stash -a -m "preserve mill-release" -- target/mill-release +git stash -a -m "preserve mill-release" -- ./mill-assembly.jar git stash -u git stash -a git stash pop "$(git stash list | grep "preserve mill-release" | head -n1 | sed -E 's/([^:]+):.*/\1/')" @@ -19,5 +19,5 @@ git stash pop "$(git stash list | grep "preserve mill-release" | head -n1 | sed ci/prepare-mill-bootstrap.sh # Run tests -target/mill-release -i "__.compile" -target/mill-release -i "example.scalalib.basic[1-simple].server.test" +./mill-assembly.jar -i "__.compile" +./mill-assembly.jar -i "example.scalalib.basic[1-simple].packaged.server.test" diff --git a/ci/test-mill-dev.sh b/ci/test-mill-dev.sh index 8ffede49a8c..71c940c4d78 100755 --- a/ci/test-mill-dev.sh +++ b/ci/test-mill-dev.sh @@ -2,7 +2,7 @@ set -eux -EXAMPLE=example/scalalib/builds/9-realistic +EXAMPLE=example/scalalib/basic/6-realistic rm -rf $EXAMPLE/out diff --git a/ci/test-mill-release.sh b/ci/test-mill-release.sh index 56fd55d69c4..ba1122e9b82 100755 --- a/ci/test-mill-release.sh +++ b/ci/test-mill-release.sh @@ -3,21 +3,21 @@ set -eux # Build Mill -./mill -i dist.assembly +./mill -i dist.installLocal -EXAMPLE=example/scalalib/builds/9-realistic +EXAMPLE=example/scalalib/basic/6-realistic rm -rf $EXAMPLE/out test ! -d $EXAMPLE/out/foo/3.3.3/compile.dest test ! -f $EXAMPLE/out/bar/2.13.8/assembly.dest/out.jar -(cd $EXAMPLE && ../../../../out/dist/assembly.dest/mill -i "foo[3.3.3].run") +(cd $EXAMPLE && ../../../../mill-assembly.jar -i "foo[3.3.3].run") test -d $EXAMPLE/out/foo/3.3.3/compile.dest -(cd $EXAMPLE && ../../../../out/dist/assembly.dest/mill show "bar[2.13.8].assembly") +(cd $EXAMPLE && ../../../../mill-assembly.jar show "bar[2.13.8].assembly") test -f $EXAMPLE/out/bar/2.13.8/assembly.dest/out.jar -(cd $EXAMPLE && ../../../../out/dist/assembly.dest/mill shutdown) \ No newline at end of file +(cd $EXAMPLE && ../../../../mill-assembly.jar shutdown) \ No newline at end of file diff --git a/ci/upload.mill b/ci/upload.mill index e81ba1caf99..afd59c359cc 100644 --- a/ci/upload.mill +++ b/ci/upload.mill @@ -1,8 +1,5 @@ package build.ci -import scalaj.http._ -import mainargs.main -@main def apply( uploadedFile: os.Path, tagName: String, @@ -12,18 +9,18 @@ def apply( githubRepo: String ): String = { - val response = Http( - s"https://api.github.com/repos/${githubOrg}/${githubRepo}/releases/tags/${tagName}" + val response = requests.get( + s"https://api.github.com/repos/${githubOrg}/${githubRepo}/releases/tags/${tagName}", + headers = Seq( + "Authorization" -> s"token $authKey", + "Accept" -> "application/vnd.github.v3+json" + ) ) - .header("Authorization", "token " + authKey) - .header("Accept", "application/vnd.github.v3+json") - .asString - val body = response.body - val parsed = ujson.read(body) + val parsed = ujson.read(response) - println("Response code: " + response.code) - println(body) + println("Response code: " + response.statusCode) + println(response.text()) val snapshotReleaseId = parsed("id").num.toInt @@ -31,15 +28,17 @@ def apply( s"https://uploads.github.com/repos/${githubOrg}/${githubRepo}/releases/" + s"$snapshotReleaseId/assets?name=$uploadName" - val res = Http(uploadUrl) - .header("Content-Type", "application/octet-stream") - .header("Authorization", "token " + authKey) - .timeout(connTimeoutMs = 5000, readTimeoutMs = 60000) - .postData(os.read.bytes(uploadedFile)) - .asString + val res = requests.post( + uploadUrl, + headers = Seq( + "Content-Type" -> "application/octet-stream", + "Authorization" -> s"token $authKey" + ), + data = os.read.stream(uploadedFile) + ) - println(res.body) - val longUrl = ujson.read(res.body)("browser_download_url").str + println(res.text()) + val longUrl = ujson.read(res)("browser_download_url").str println("Long Url " + longUrl) longUrl diff --git a/contrib/artifactory/src/mill/contrib/artifactory/ArtifactoryPublishModule.scala b/contrib/artifactory/src/mill/contrib/artifactory/ArtifactoryPublishModule.scala index 4c3e40b8787..aa12ccec721 100644 --- a/contrib/artifactory/src/mill/contrib/artifactory/ArtifactoryPublishModule.scala +++ b/contrib/artifactory/src/mill/contrib/artifactory/ArtifactoryPublishModule.scala @@ -27,7 +27,7 @@ trait ArtifactoryPublishModule extends PublishModule { artifactorySnapshotUri: String = artifactorySnapshotUri, readTimeout: Int = 60000, connectTimeout: Int = 5000 - ): define.Command[Unit] = T.command { + ): define.Command[Unit] = Task.Command { val PublishModule.PublishData(artifactInfo, artifacts) = publishArtifacts() new ArtifactoryPublisher( artifactoryUri, @@ -59,7 +59,7 @@ object ArtifactoryPublishModule extends ExternalModule { publishArtifacts: mill.main.Tasks[PublishModule.PublishData], readTimeout: Int = 60000, connectTimeout: Int = 5000 - ) = T.command { + ) = Task.Command { val artifacts = T.sequence(publishArtifacts.value)().map { case data @ PublishModule.PublishData(_, _) => data.withConcretePath @@ -76,7 +76,7 @@ object ArtifactoryPublishModule extends ExternalModule { ) } - private def checkArtifactoryCreds(credentials: String): Task[String] = T.task { + private def checkArtifactoryCreds(credentials: String): Task[String] = Task.Anon { if (credentials.isEmpty) { (for { username <- T.env.get("ARTIFACTORY_USERNAME") diff --git a/contrib/bintray/readme.adoc b/contrib/bintray/readme.adoc index 1028b1c3544..e7909b1d4b3 100644 --- a/contrib/bintray/readme.adoc +++ b/contrib/bintray/readme.adoc @@ -34,7 +34,7 @@ import mill.contrib.bintray.BintrayPublishModule object mymodule extends BintrayPublishModule { def bintrayOwner = "owner" def bintrayRepo = "repo" - def bintrayPackage = T {...} + def bintrayPackage = Task {...} ... } diff --git a/contrib/bintray/src/mill/contrib/bintray/BintrayPublishModule.scala b/contrib/bintray/src/mill/contrib/bintray/BintrayPublishModule.scala index 1fd35ceb8cd..bcd0448c21d 100644 --- a/contrib/bintray/src/mill/contrib/bintray/BintrayPublishModule.scala +++ b/contrib/bintray/src/mill/contrib/bintray/BintrayPublishModule.scala @@ -12,9 +12,9 @@ trait BintrayPublishModule extends PublishModule { def bintrayRepo: String - def bintrayPackage = T { artifactId() } + def bintrayPackage: T[String] = Task { artifactId() } - def bintrayPublishArtifacts: T[BintrayPublishData] = T { + def bintrayPublishArtifacts: T[BintrayPublishData] = Task { val PublishModule.PublishData(artifactInfo, artifacts) = publishArtifacts() BintrayPublishData(artifactInfo, artifacts, bintrayPackage()) } @@ -36,7 +36,7 @@ trait BintrayPublishModule extends PublishModule { release: Boolean = true, readTimeout: Int = 60000, connectTimeout: Int = 5000 - ): define.Command[Unit] = T.command { + ): define.Command[Unit] = Task.Command { new BintrayPublisher( bintrayOwner, bintrayRepo, @@ -69,7 +69,7 @@ object BintrayPublishModule extends ExternalModule { publishArtifacts: mill.main.Tasks[BintrayPublishData], readTimeout: Int = 60000, connectTimeout: Int = 5000 - ) = T.command { + ) = Task.Command { new BintrayPublisher( bintrayOwner, bintrayRepo, @@ -83,7 +83,7 @@ object BintrayPublishModule extends ExternalModule { ) } - private def checkBintrayCreds(credentials: String): Task[String] = T.task { + private def checkBintrayCreds(credentials: String): Task[String] = Task.Anon { if (credentials.isEmpty) { (for { username <- T.env.get("BINTRAY_USERNAME") diff --git a/contrib/bloop/readme.adoc b/contrib/bloop/readme.adoc index 7503ac632da..1eed3e5b27e 100644 --- a/contrib/bloop/readme.adoc +++ b/contrib/bloop/readme.adoc @@ -33,7 +33,7 @@ import mill.scalalib._ import mill.contrib.bloop.Bloop object MyModule extends ScalaModule with Bloop.Module { - def myTask = T { bloop.config() } + def myTask = Task { bloop.config() } } ---- @@ -46,5 +46,6 @@ located inside a project workspace. == Note regarding current mill support in bloop -The mill-bloop integration currently present in the https://github.com/scalacenter/bloop/blob/master/integrations/mill-bloop/src/main/scala/bloop/integrations/mill/MillBloop.scala#L10[bloop codebase] +The mill-bloop integration currently present in the +https://github.com/scalacenter/bloop[bloop codebase] will be deprecated in favour of this implementation. diff --git a/contrib/bloop/src/mill/contrib/bloop/BloopImpl.scala b/contrib/bloop/src/mill/contrib/bloop/BloopImpl.scala index 2a930b2b4b0..08861925e3b 100644 --- a/contrib/bloop/src/mill/contrib/bloop/BloopImpl.scala +++ b/contrib/bloop/src/mill/contrib/bloop/BloopImpl.scala @@ -27,7 +27,7 @@ class BloopImpl(evs: () => Seq[Evaluator], wd: os.Path) extends ExternalModule { * Generates bloop configuration files reflecting the build, * under pwd/.bloop. */ - def install() = T.command { + def install() = Task.Command { val res = T.traverse(computeModules)(_.bloop.writeConfigFile())() val written = res.map(_._2).map(_.path) // Make bloopDir if it doesn't exists @@ -61,7 +61,7 @@ class BloopImpl(evs: () => Seq[Evaluator], wd: os.Path) extends ExternalModule { def linkerMode: T[Option[BloopConfig.LinkerMode]] = None object bloop extends MillModule { - def config = T { + def config = Task { new BloopOps(self).bloop.config() } } @@ -85,9 +85,9 @@ class BloopImpl(evs: () => Seq[Evaluator], wd: os.Path) extends ExternalModule { override def millOuterCtx = jm.millOuterCtx object bloop extends MillModule { - def config = T { outer.bloopConfig(jm) } + def config = Task { outer.bloopConfig(jm) } - def writeConfigFile(): Command[(String, PathRef)] = T.command { + def writeConfigFile(): Command[(String, PathRef)] = Task.Command { os.makeDir.all(bloopDir) val path = bloopConfigPath(jm) _root_.bloop.config.write(config(), path.toNIO) @@ -96,7 +96,7 @@ class BloopImpl(evs: () => Seq[Evaluator], wd: os.Path) extends ExternalModule { } @deprecated("Use writeConfigFile instead.", "Mill after 0.10.9") - def writeConfig: Target[(String, PathRef)] = T { + def writeConfig: T[(String, PathRef)] = Task { writeConfigFile()() } } @@ -135,10 +135,10 @@ class BloopImpl(evs: () => Seq[Evaluator], wd: os.Path) extends ExternalModule { /** * Computes sources files paths for the whole project. Cached in a way - * that does not get invalidated upon sourcefile change. Mainly called + * that does not get invalidated upon source file change. Mainly called * from module#sources in bloopInstall */ - def moduleSourceMap = T.input { + def moduleSourceMap = Task.Input { val sources = T.traverse(computeModules) { m => m.allSources.map { paths => name(m) -> paths.map(_.path) @@ -161,7 +161,7 @@ class BloopImpl(evs: () => Seq[Evaluator], wd: os.Path) extends ExternalModule { def out(m: JavaModule) = bloopDir / "out" / name(m) def classes(m: JavaModule) = out(m) / "classes" - val javaConfig = T.task { + val javaConfig = Task.Anon { val opts = module.javacOptions() ++ module.mandatoryJavacOptions() Some(Config.Java(options = opts.toList)) } @@ -172,7 +172,7 @@ class BloopImpl(evs: () => Seq[Evaluator], wd: os.Path) extends ExternalModule { val scalaConfig = module match { case s: ScalaModule => - T.task { + Task.Anon { Some( BloopConfig.Scala( organization = s.scalaOrganization(), @@ -185,7 +185,7 @@ class BloopImpl(evs: () => Seq[Evaluator], wd: os.Path) extends ExternalModule { ) ) } - case _ => T.task(None) + case _ => Task.Anon(None) } // ////////////////////////////////////////////////////////////////////////// @@ -194,17 +194,17 @@ class BloopImpl(evs: () => Seq[Evaluator], wd: os.Path) extends ExternalModule { def jsLinkerMode(m: JavaModule): Task[Config.LinkerMode] = (m.asBloop match { - case Some(bm) => T.task(bm.linkerMode()) - case None => T.task(None) + case Some(bm) => Task.Anon(bm.linkerMode()) + case None => Task.Anon(None) }).map(_.getOrElse(Config.LinkerMode.Debug)) // ////////////////////////////////////////////////////////////////////////// // Classpath // ////////////////////////////////////////////////////////////////////////// - val classpath = T.task { + val classpath = Task.Anon { val transitiveCompileClasspath = T.traverse(module.transitiveModuleCompileModuleDeps)(m => - T.task { m.localCompileClasspath().map(_.path) ++ Agg(classes(m)) } + Task.Anon { m.localCompileClasspath().map(_.path) ++ Agg(classes(m)) } )().flatten module.resolvedIvyDeps().map(_.path) ++ @@ -212,20 +212,20 @@ class BloopImpl(evs: () => Seq[Evaluator], wd: os.Path) extends ExternalModule { module.localCompileClasspath().map(_.path) } - val runtimeClasspath = T.task { + val runtimeClasspath = Task.Anon { module.transitiveModuleDeps.map(classes) ++ module.resolvedRunIvyDeps().map(_.path) ++ module.unmanagedClasspath().map(_.path) } val compileResources = - T.task(module.compileResources().map(_.path.toNIO).toList) + Task.Anon(module.compileResources().map(_.path.toNIO).toList) val runtimeResources = - T.task(compileResources() ++ module.resources().map(_.path.toNIO).toList) + Task.Anon(compileResources() ++ module.resources().map(_.path.toNIO).toList) val platform: Task[BloopConfig.Platform] = module match { case m: ScalaJSModule => - T.task { + Task.Anon { BloopConfig.Platform.Js( BloopConfig.JsConfig.empty.copy( version = m.scalaJSVersion(), @@ -246,7 +246,7 @@ class BloopImpl(evs: () => Seq[Evaluator], wd: os.Path) extends ExternalModule { ) } case m: ScalaNativeModule => - T.task { + Task.Anon { BloopConfig.Platform.Native( BloopConfig.NativeConfig.empty.copy( version = m.scalaNativeVersion(), @@ -270,7 +270,7 @@ class BloopImpl(evs: () => Seq[Evaluator], wd: os.Path) extends ExternalModule { ) } case _ => - T.task { + Task.Anon { BloopConfig.Platform.Jvm( BloopConfig.JvmConfig( home = T.env.get("JAVA_HOME").map(s => os.Path(s).toNIO), @@ -298,7 +298,7 @@ class BloopImpl(evs: () => Seq[Evaluator], wd: os.Path) extends ExternalModule { val testConfig = module match { case m: TestModule => - T.task { + Task.Anon { Some( BloopConfig.Test( frameworks = Seq(m.testFramework()) @@ -311,7 +311,7 @@ class BloopImpl(evs: () => Seq[Evaluator], wd: os.Path) extends ExternalModule { ) ) } - case _ => T.task(None) + case _ => Task.Anon(None) } // ////////////////////////////////////////////////////////////////////////// @@ -389,7 +389,7 @@ class BloopImpl(evs: () => Seq[Evaluator], wd: os.Path) extends ExternalModule { gatherTask.unsafeRun() } - val bloopResolution: Task[BloopConfig.Resolution] = T.task { + val bloopResolution: Task[BloopConfig.Resolution] = Task.Anon { val repos = module.repositoriesTask() // same as input of resolvedIvyDeps val allIvyDeps = module.transitiveIvyDeps() ++ module.transitiveCompileIvyDeps() @@ -401,7 +401,7 @@ class BloopImpl(evs: () => Seq[Evaluator], wd: os.Path) extends ExternalModule { // Tying up // ////////////////////////////////////////////////////////////////////////// - val project = T.task { + val project = Task.Anon { val mSources = moduleSourceMap() .get(name(module)) .toSeq @@ -436,7 +436,7 @@ class BloopImpl(evs: () => Seq[Evaluator], wd: os.Path) extends ExternalModule { ) } - T.task { + Task.Anon { BloopConfig.File( version = BloopConfig.File.LatestVersion, project = project() diff --git a/contrib/buildinfo/src/mill/contrib/buildinfo/BuildInfo.scala b/contrib/buildinfo/src/mill/contrib/buildinfo/BuildInfo.scala index 094f54749ea..fbab5932054 100644 --- a/contrib/buildinfo/src/mill/contrib/buildinfo/BuildInfo.scala +++ b/contrib/buildinfo/src/mill/contrib/buildinfo/BuildInfo.scala @@ -1,6 +1,6 @@ package mill.contrib.buildinfo -import mill.T +import mill.{T, Task} import mill.api.PathRef import mill.scalalib.{JavaModule, ScalaModule} import mill.scalanativelib.ScalaNativeModule @@ -36,10 +36,10 @@ trait BuildInfo extends JavaModule { def buildInfoMembers: T[Seq[BuildInfo.Value]] = Seq.empty[BuildInfo.Value] def resources: T[Seq[PathRef]] = - if (buildInfoStaticCompiled) super.resources - else T.sources { super.resources() ++ Seq(buildInfoResources()) } + if (buildInfoStaticCompiled) Task { super.resources() } + else Task { super.resources() ++ Seq(buildInfoResources()) } - def buildInfoResources = T { + def buildInfoResources = Task { val p = new java.util.Properties for (v <- buildInfoMembers()) p.setProperty(v.key, v.value) @@ -59,11 +59,11 @@ trait BuildInfo extends JavaModule { private def isScala = this.isInstanceOf[ScalaModule] - override def generatedSources = T { + override def generatedSources = Task { super.generatedSources() ++ buildInfoSources() } - def buildInfoSources = T { + def buildInfoSources = Task { if (buildInfoMembers().isEmpty) Nil else { val code = if (buildInfoStaticCompiled) BuildInfo.staticCompiledCodegen( @@ -161,9 +161,10 @@ object BuildInfo { case v => if (isScala) s"""${commentStr(v)}val ${v.key} = buildInfoProperties.getProperty("${v.key}")""" - else s"""${commentStr( - v - )}public static final java.lang.String ${v.key} = buildInfoProperties.getProperty("${v.key}");""" + else { + val propValue = s"""buildInfoProperties.getProperty("${v.key}")""" + s"""${commentStr(v)}public static final java.lang.String ${v.key} = $propValue;""" + } } .mkString("\n\n ") @@ -172,7 +173,7 @@ object BuildInfo { |package ${buildInfoPackageName} | |object $buildInfoObjectName { - | private[this] val buildInfoProperties: java.util.Properties = new java.util.Properties() + | private val buildInfoProperties: java.util.Properties = new java.util.Properties() | | { | val buildInfoInputStream = getClass diff --git a/contrib/buildinfo/test/src/mill/contrib/buildinfo/BuildInfoTests.scala b/contrib/buildinfo/test/src/mill/contrib/buildinfo/BuildInfoTests.scala index 1c8ce5b4000..4af8f1b5189 100644 --- a/contrib/buildinfo/test/src/mill/contrib/buildinfo/BuildInfoTests.scala +++ b/contrib/buildinfo/test/src/mill/contrib/buildinfo/BuildInfoTests.scala @@ -86,7 +86,7 @@ object BuildInfoTests extends TestSuite { ) } - val testModuleSourcesPath: Path = os.Path(sys.env("MILL_TEST_RESOURCE_FOLDER")) / "buildinfo" + val testModuleSourcesPath: Path = os.Path(sys.env("MILL_TEST_RESOURCE_DIR")) / "buildinfo" def buildInfoSourcePath(eval: UnitTester) = eval.outPath / "buildInfoSources.dest/foo/BuildInfo.scala" @@ -158,7 +158,7 @@ object BuildInfoTests extends TestSuite { test("run") - UnitTester(BuildInfoPlain, testModuleSourcesPath / "scala").scoped { eval => val runResult = eval.outPath / "hello-mill" val Right(_) = - eval.apply(BuildInfoPlain.run(T.task(Args(runResult.toString)))) + eval.apply(BuildInfoPlain.run(Task.Anon(Args(runResult.toString)))) assert( os.exists(runResult), @@ -178,7 +178,7 @@ object BuildInfoTests extends TestSuite { val runResult = eval.outPath / "hello-mill" val Right(_) = - eval.apply(BuildInfoStatic.run(T.task(Args(runResult.toString)))) + eval.apply(BuildInfoStatic.run(Task.Anon(Args(runResult.toString)))) assert(os.exists(buildInfoSourcePath(eval))) assert(!os.exists(buildInfoResourcePath(eval))) @@ -189,7 +189,7 @@ object BuildInfoTests extends TestSuite { test("java") - UnitTester(BuildInfoJava, testModuleSourcesPath / "java").scoped { eval => val runResult = eval.outPath / "hello-mill" val Right(_) = - eval.apply(BuildInfoJava.run(T.task(Args(runResult.toString)))) + eval.apply(BuildInfoJava.run(Task.Anon(Args(runResult.toString)))) assert( os.exists(runResult), @@ -202,7 +202,7 @@ object BuildInfoTests extends TestSuite { val runResult = eval.outPath / "hello-mill" val generatedSrc = eval.outPath / "buildInfoSources.dest/foo/BuildInfo.java" val Right(_) = - eval.apply(BuildInfoJavaStatic.run(T.task(Args(runResult.toString)))) + eval.apply(BuildInfoJavaStatic.run(Task.Anon(Args(runResult.toString)))) assert( os.exists(runResult), diff --git a/contrib/checkstyle/readme.adoc b/contrib/checkstyle/readme.adoc deleted file mode 100644 index 24c2f7743b6..00000000000 --- a/contrib/checkstyle/readme.adoc +++ /dev/null @@ -1,128 +0,0 @@ -= Checkstyle -:page-aliases: Plugin_Checkstyle.adoc - -Performs quality checks on Java source files using https://checkstyle.org[Checkstyle] and generates reports from these checks. - -== CheckstyleModule - -To use this plugin in a Java/Scala module, - -1. Extend `mill.contrib.checkstyle.CheckstyleModule`. -2. Define a https://checkstyle.org/config.html[configuration] file `checkstyle-config.xml`. -3. Run the `checkstyle` command. - -=== checkstyle - -- flags -[source,sh] ----- - -// if an exception should be raised when violations are found -checkstyle --check - -// if Checkstyle output report should be written to System.out -checkstyle --stdout ----- - -- sources (optional) -[source,sh] ----- -// incorrect paths will cause a command failure -// checkstyle a/b - -// you can specify paths relative to millSourcePath -checkstyle src/a/b - -// process a single file -checkstyle src/a/B.java - -// process multiple sources -checkstyle src/a/b src/c/d src/e/F.java - -// process with flags -checkstyle --check --stdout src/a/b src/c/d - -// process all module sources -checkstyle ----- - -=== Shared configuration - -To share `checkstyle-config.xml` across modules, adapt the following example. -[source,scala] ----- -import mill._ -import mill.contrib.checkstyle.CheckstyleModule -import mill.scalalib._ - -object foo extends Module { - - object bar extends MyModule - object baz extends Module { - object fizz extends MyModule - object buzz extends MyModule - } - - trait MyModule extends JavaModule with CheckstyleModule { - - override def checkstyleConfig = T { - api.PathRef(T.workspace / "checkstyle-config.xml") - } - } -} ----- - - -=== Limitations - -- Version `6.3` or above is required for `plain` and `xml` formats. -- Setting `checkstyleOptions` might cause failures with legacy versions. - -== CheckstyleXsltModule - -This plugin extends the `mill.contrib.checkstyle.CheckstyleModule` with the ability to generate reports by applying https://www.w3.org/TR/xslt/[XSL Transformations] on a Checkstyle output report. - -=== Auto detect XSL Transformations - -XSLT files are detected automatically provided a prescribed directory structure is followed. -[source,scala] ----- -/** - * checkstyle-xslt - * ├─ html - * │ ├─ xslt0.xml - * │ └─ xslt1.xml - * └─ pdf - * ├─ xslt1.xml - * └─ xslt2.xml - * - * html/xslt0.xml -> xslt0.html - * html/xslt1.xml -> xslt1.html - * pdf/xslt1.xml -> xslt1.pdf - * pdf/xslt2.xml -> xslt2.pdf - */ ----- - -=== Specify XSL Transformations manually - -For a custom setup, adapt the following example. -[source,scala] ----- -import mill._ -import mill.api.PathRef -import mill.contrib.checkstyle.CheckstyleXsltModule -import mill.contrib.checkstyle.CheckstyleXsltReport -import mill.scalalib._ - -object foo extends JavaModule with CheckstyleXsltModule { - - override def checkstyleXsltReports = T { - Set( - CheckstyleXsltReport( - PathRef(millSourcePath / "checkstyle-no-frames.xml"), - PathRef(T.dest / "checkstyle-no-frames.html"), - ) - ) - } -} ----- diff --git a/contrib/checkstyle/src/mill/contrib/checkstyle/CheckstyleXsltModule.scala b/contrib/checkstyle/src/mill/contrib/checkstyle/CheckstyleXsltModule.scala deleted file mode 100644 index 8a0b7021d2f..00000000000 --- a/contrib/checkstyle/src/mill/contrib/checkstyle/CheckstyleXsltModule.scala +++ /dev/null @@ -1,103 +0,0 @@ -package mill -package contrib.checkstyle - -import javax.xml.transform.TransformerFactory -import javax.xml.transform.stream.{StreamResult, StreamSource} - -/** - * Extends [[CheckstyleModule]] with the ability to generate [[CheckstyleXsltReport]]s. - */ -trait CheckstyleXsltModule extends CheckstyleModule { - - /** - * Runs [[CheckstyleModule.checkstyle]] and uses [[CheckstyleModule.checkstyleOutput]] to generate [[checkstyleXsltReports]]. - */ - override def checkstyle(@mainargs.arg checkstyleArgs: CheckstyleArgs): Command[Int] = T.command { - val (output, exitCode) = checkstyle0(false, checkstyleArgs.sources)() - - val checkOutput = checkstyleOutput().path - - if (os.exists(checkOutput)) { - checkstyleXsltReports().foreach { - case CheckstyleXsltReport(xslt, output) => - val xsltSource = new StreamSource(xslt.path.getInputStream) - xsltSource.setSystemId(xslt.path.toIO) // so that relative URI references can be resolved - - val checkSource = - new StreamSource(checkOutput.getInputStream) - - val outputResult = - new StreamResult(os.write.outputStream(output.path, createFolders = true)) - - T.log.info(s"transforming checkstyle output report with $xslt") - - TransformerFactory.newInstance() - .newTransformer(xsltSource) - .transform(checkSource, outputResult) - - T.log.info(s"transformed output report at $output") - } - } - - checkstyleHandleErrors(checkstyleArgs.stdout, checkstyleArgs.check, exitCode, output) - } - - /** - * Necessary in order to allow XSLT transformations on the results - */ - final override def checkstyleFormat: T[String] = "xml" - - /** - * Folder containing the XSLT transformations. Defaults to `checkstyle-xslt` - * in the workspace root, but can be overriden on a per-module basis - */ - def checkstyleXsltfFolder = T.source(T.workspace / "checkstyle-xslt") - - /** - * Set of [[CheckstyleXsltReport]]s. - * - * The default implementation maps XSLT files, under `checkstyle-xslt`, as depicted below: - * {{{ - * - * checkstyle-xslt - * ├─ html - * │ ├─ xslt0.xml - * │ └─ xslt1.xml - * └─ pdf - * ├─ xslt1.xml - * └─ xslt2.xml - * - * html/xslt0.xml -> xslt0.html - * html/xslt1.xml -> xslt1.html - * pdf/xslt1.xml -> xslt1.pdf - * pdf/xslt2.xml -> xslt2.pdf - * - * }}} - */ - def checkstyleXsltReports: T[Set[CheckstyleXsltReport]] = T { - val dir = checkstyleXsltfFolder().path - - if (os.exists(dir)) { - val dest = T.dest - os.list(dir) - .iterator - .filter(os.isDir) - .flatMap(childDir => - os.list(childDir) - .iterator - .filter(os.isFile) - .filter(_.ext == "xml") - .map(xslt => - CheckstyleXsltReport( - PathRef(xslt), - PathRef(dest / s"${xslt.baseName}.${childDir.last}") - ) - ) - ) - .toSet - } else { - T.log.info(s"expected XSLT files under $dir") - Set.empty[CheckstyleXsltReport] - } - } -} diff --git a/contrib/codeartifact/src/mill/contrib/codeartifact/CodeartifactPublishModule.scala b/contrib/codeartifact/src/mill/contrib/codeartifact/CodeartifactPublishModule.scala index 6a16297562f..ff1b4d24739 100644 --- a/contrib/codeartifact/src/mill/contrib/codeartifact/CodeartifactPublishModule.scala +++ b/contrib/codeartifact/src/mill/contrib/codeartifact/CodeartifactPublishModule.scala @@ -17,7 +17,7 @@ trait CodeartifactPublishModule extends PublishModule { readTimeout: Int = 60000, connectTimeout: Int = 5000 ): define.Command[Unit] = - T.command { + Task.Command { val PublishModule.PublishData(artifactInfo, artifacts) = publishArtifacts() @@ -41,7 +41,7 @@ object CodeartifactPublishModule extends ExternalModule { readTimeout: Int = 60000, connectTimeout: Int = 5000 ) = - T.command { + Task.Command { val artifacts = T.sequence(publishArtifacts.value)().map { case data @ PublishModule.PublishData(_, _) => data.withConcretePath } diff --git a/contrib/docker/src/mill/contrib/docker/DockerModule.scala b/contrib/docker/src/mill/contrib/docker/DockerModule.scala index ac982a41726..2ab63858cef 100644 --- a/contrib/docker/src/mill/contrib/docker/DockerModule.scala +++ b/contrib/docker/src/mill/contrib/docker/DockerModule.scala @@ -98,7 +98,7 @@ trait DockerModule { outer: JavaModule => */ def executable: T[String] = "docker" - def dockerfile: T[String] = T { + def dockerfile: T[String] = Task { val jarName = assembly().path.last val labelRhs = labels() .map { case (k, v) => @@ -137,7 +137,7 @@ trait DockerModule { outer: JavaModule => |ENTRYPOINT [$quotedEntryPointArgs]""".stripMargin } - private def pullAndHash = T.input { + private def pullAndHash = Task.Input { def imageHash() = os.proc(executable(), "images", "--no-trunc", "--quiet", baseImage()) .call(stderr = os.Inherit).out.text().trim @@ -149,7 +149,7 @@ trait DockerModule { outer: JavaModule => (pullBaseImage(), imageHash()) } - final def build = T { + final def build = Task { val dest = T.dest val asmPath = outer.assembly().path @@ -187,7 +187,7 @@ trait DockerModule { outer: JavaModule => tags() } - final def push() = T.command { + final def push() = Task.Command { val tags = build() tags.foreach(t => os.proc(executable(), "push", t).call(stdout = os.Inherit, stderr = os.Inherit) diff --git a/contrib/docker/test/src/mill/contrib/docker/DockerModuleTest.scala b/contrib/docker/test/src/mill/contrib/docker/DockerModuleTest.scala index 443c0b625f1..7b2807a7b78 100644 --- a/contrib/docker/test/src/mill/contrib/docker/DockerModuleTest.scala +++ b/contrib/docker/test/src/mill/contrib/docker/DockerModuleTest.scala @@ -45,9 +45,9 @@ object DockerModuleTest extends TestSuite { val testArtifactName = "mill-docker-contrib-test" - val testModuleSourcesPath: Path = os.Path(sys.env("MILL_TEST_RESOURCE_FOLDER")) / "docker" + val testModuleSourcesPath: Path = os.Path(sys.env("MILL_TEST_RESOURCE_DIR")) / "docker" - val multineRegex = "\\R+".r + val multilineRegex = "\\R+".r private def isInstalled(executable: String): Boolean = { val getPathCmd = if (scala.util.Properties.isWin) "where" else "which" @@ -91,14 +91,14 @@ object DockerModuleTest extends TestSuite { test("dockerfile contents") { test("default options") - UnitTester(Docker, null).scoped { eval => val Right(result) = eval(Docker.dockerDefault.dockerfile) - val expected = multineRegex.replaceAllIn( + val expected = multilineRegex.replaceAllIn( """ |FROM gcr.io/distroless/java:latest |COPY out.jar /out.jar |ENTRYPOINT ["java", "-jar", "/out.jar"]""".stripMargin, sys.props.getOrElse("line.separator", ???) ) - val dockerfileStringRefined = multineRegex.replaceAllIn( + val dockerfileStringRefined = multilineRegex.replaceAllIn( result.value, sys.props.getOrElse("line.separator", ???) ) @@ -107,7 +107,7 @@ object DockerModuleTest extends TestSuite { test("all options") - UnitTester(Docker, null).scoped { eval => val Right(result) = eval(Docker.dockerAll.dockerfile) - val expected = multineRegex.replaceAllIn( + val expected = multilineRegex.replaceAllIn( """ |FROM docker.io/openjdk:11 |LABEL "version"="1.0" @@ -123,7 +123,7 @@ object DockerModuleTest extends TestSuite { |ENTRYPOINT ["java", "-jar", "/out.jar"]""".stripMargin, sys.props.getOrElse("line.separator", ???) ) - val dockerfileStringRefined = multineRegex.replaceAllIn( + val dockerfileStringRefined = multilineRegex.replaceAllIn( result.value, sys.props.getOrElse("line.separator", ???) ) @@ -132,14 +132,14 @@ object DockerModuleTest extends TestSuite { test("extra jvm options") - UnitTester(Docker, null).scoped { eval => val Right(result) = eval(Docker.dockerJvmOptions.dockerfile) - val expected = multineRegex.replaceAllIn( + val expected = multilineRegex.replaceAllIn( """ |FROM gcr.io/distroless/java:latest |COPY out.jar /out.jar |ENTRYPOINT ["java", "-Xmx1024M", "-jar", "/out.jar"]""".stripMargin, sys.props.getOrElse("line.separator", ???) ) - val dockerfileStringRefined = multineRegex.replaceAllIn( + val dockerfileStringRefined = multilineRegex.replaceAllIn( result.value, sys.props.getOrElse("line.separator", ???) ) diff --git a/contrib/errorprone/readme.adoc b/contrib/errorprone/readme.adoc deleted file mode 100644 index 159b045051a..00000000000 --- a/contrib/errorprone/readme.adoc +++ /dev/null @@ -1,31 +0,0 @@ -= Mill ErrorProne Plugin -:page-aliases: Plugin_ErrorProne.adoc - -https://errorprone.info/index[Error Prone] augments the Java compiler's type checker and detect common mistakes at compile time. - -You just need to mix the `ErrorProneModule` into your `JavaModule` and it will automatically run with every compilation. - -.`build.mill.scala`: Enable `ErrorProne` in a module -[source,scala] ----- -package build -import mill._, scalalib._ - -import $ivy.`com.lihaoyi::mill-contrib-errorprone:` -import mill.contrib.errorprone.ErrorProneModule - -object foo extends JavaModule with ErrorProneModule { -} ----- - -== Configuration - -The following configuration options exist: - -`def errorProneVersion: T[String]`:: -The `error-prone` version to use. Defaults to [[BuildInfo.errorProneVersion]], the version used to build and test the module. -Find the latest at https://mvnrepository.com/artifact/com.google.errorprone/error_prone_core[mvnrepository.com] - -`def errorProneOptions: T[Seq[String]]`:: - Options directly given to the `error-prone` processor. -Those are documented as "flags" at https://errorprone.info/docs/flags diff --git a/contrib/flyway/readme.adoc b/contrib/flyway/readme.adoc index 9a68fc56af8..16dec14d4e5 100644 --- a/contrib/flyway/readme.adoc +++ b/contrib/flyway/readme.adoc @@ -41,4 +41,4 @@ mill foo.flywayMigrate CAUTION: You should never hard-code credentials or check them into a version control system. You should write some code to populate the settings for flyway instead. -For example `def flywayPassword = T.input(T.ctx.env("FLYWAY_PASSWORD"))` +For example `def flywayPassword = Task.Input(T.ctx.env("FLYWAY_PASSWORD"))` diff --git a/contrib/flyway/src/mill/contrib/flyway/FlywayModule.scala b/contrib/flyway/src/mill/contrib/flyway/FlywayModule.scala index a7c6f7a7e57..2b8c8c102e9 100644 --- a/contrib/flyway/src/mill/contrib/flyway/FlywayModule.scala +++ b/contrib/flyway/src/mill/contrib/flyway/FlywayModule.scala @@ -10,7 +10,7 @@ import org.flywaydb.core.internal.configuration.{ConfigUtils => flyway} import org.flywaydb.core.internal.info.MigrationInfoDumper import scala.jdk.CollectionConverters._ -import mill.{Agg, T} +import mill.{Agg, T, Task} import mill.api.PathRef import mill.define.Command import mill.scalalib.{Dep, JavaModule} @@ -22,13 +22,13 @@ trait FlywayModule extends JavaModule { def flywayUrl: T[String] def flywayUser: T[String] = T("") def flywayPassword: T[String] = T("") - def flywayFileLocations: T[Seq[PathRef]] = T { + def flywayFileLocations: T[Seq[PathRef]] = Task { resources().map(pr => PathRef(pr.path / "db/migration", pr.quick)) } def flywayDriverDeps: T[Agg[Dep]] - def jdbcClasspath = T { + def jdbcClasspath = Task { defaultResolver().resolveDeps(flywayDriverDeps()) } @@ -37,7 +37,7 @@ trait FlywayModule extends JavaModule { .filter(_.nonEmpty) .map(key -> _) - def flywayInstance = T.worker { + def flywayInstance = Task.Worker { val jdbcClassloader = new URLClassLoader(jdbcClasspath().map(_.path.toIO.toURI.toURL).toArray) val configProps = Map(flyway.URL -> flywayUrl()) ++ @@ -53,19 +53,19 @@ trait FlywayModule extends JavaModule { .load } - def flywayMigrate(): Command[MigrateResult] = T.command { + def flywayMigrate(): Command[MigrateResult] = Task.Command { flywayInstance().migrate() } - def flywayClean(): Command[CleanResult] = T.command { + def flywayClean(): Command[CleanResult] = Task.Command { flywayInstance().clean() } - def flywayBaseline(): Command[BaselineResult] = T.command { + def flywayBaseline(): Command[BaselineResult] = Task.Command { flywayInstance().baseline() } - def flywayInfo(): Command[String] = T.command { + def flywayInfo(): Command[String] = Task.Command { val info = flywayInstance().info val current = info.current val currentSchemaVersion = diff --git a/contrib/flyway/test/src/mill/contrib/flyway/BuildTest.scala b/contrib/flyway/test/src/mill/contrib/flyway/BuildTest.scala index 0bb235db9ff..247fe934437 100644 --- a/contrib/flyway/test/src/mill/contrib/flyway/BuildTest.scala +++ b/contrib/flyway/test/src/mill/contrib/flyway/BuildTest.scala @@ -10,8 +10,8 @@ object BuildTest extends TestSuite { object Build extends TestBaseModule { object build extends FlywayModule { - val resourceFolder = os.Path(sys.env("MILL_TEST_RESOURCE_FOLDER")) - override def resources = T.sources(resourceFolder) + val resourceFolder = os.Path(sys.env("MILL_TEST_RESOURCE_DIR")) + override def resources = Task.Sources(resourceFolder) def h2 = ivy"com.h2database:h2:2.1.214" diff --git a/contrib/gitlab/readme.adoc b/contrib/gitlab/readme.adoc index 387e3861833..f245c0dc0fa 100644 --- a/contrib/gitlab/readme.adoc +++ b/contrib/gitlab/readme.adoc @@ -33,7 +33,7 @@ object lib extends ScalaModule with GitlabPublishModule { `publishVersion` and `pomSettings` come from `PublishModule`. `GitlabPublishModule` requires you to -set `publishRepository` for target of artifact publishing. Note that this *must* be a +set `publishRepository` for task of artifact publishing. Note that this *must* be a project repository defined by project id (publishing to other type of repositories is not https://docs.gitlab.com/ee/user/packages/maven_repository/#use-the-gitlab-endpoint-for-maven-packages[supported]). @@ -129,7 +129,7 @@ import mill.api.Result.Success override def gitlabHeaders( props: Map[String, String] // System properties - ): Task[GitlabAuthHeaders] = T.task { + ): Task[GitlabAuthHeaders] = Task.Anon { // This uses default lookup and ads custom headers val access = tokenLookup.resolveGitlabToken(T.env, props, T.workspace) val accessHeader = access.fold(_ => Seq.empty[(String, String)], _.headers) @@ -164,7 +164,7 @@ import $ivy.`com.lihaoyi::mill-contrib-gitlab:` import mill.contrib.gitlab._ // DON'T DO THIS -def repositoriesTask = T.task { +def repositoriesTask = Task.Anon { super.repositoriesTask() ++ Seq( MavenRepository("https://gitlab.local/api/v4/projects/42/packages/maven", Some(Authentication(Seq(("Private-Token", "<>")))))) @@ -186,7 +186,7 @@ object myPackageRepository extends GitlabMavenRepository { } object myModule extends ScalaModule { - def repositoriesTask = T.task { + def repositoriesTask = Task.Anon { super.repositoriesTask() ++ Seq( MavenRepository("https://oss.sonatype.org/content/repositories/releases"), diff --git a/contrib/gitlab/src/mill/contrib/gitlab/GitlabMavenRepository.scala b/contrib/gitlab/src/mill/contrib/gitlab/GitlabMavenRepository.scala index 8ac1907a4f1..786949f72ec 100644 --- a/contrib/gitlab/src/mill/contrib/gitlab/GitlabMavenRepository.scala +++ b/contrib/gitlab/src/mill/contrib/gitlab/GitlabMavenRepository.scala @@ -12,7 +12,7 @@ trait GitlabMavenRepository { def tokenLookup: GitlabTokenLookup = new GitlabTokenLookup {} // For token discovery def gitlabRepository: GitlabPackageRepository // For package discovery - def mavenRepository: Task[MavenRepository] = T.task { + def mavenRepository: Task[MavenRepository] = Task.Anon { val gitlabAuth = tokenLookup.resolveGitlabToken(T.env, sys.props.toMap, T.workspace) .map(auth => Authentication(auth.headers)) diff --git a/contrib/gitlab/src/mill/contrib/gitlab/GitlabPublishModule.scala b/contrib/gitlab/src/mill/contrib/gitlab/GitlabPublishModule.scala index 5dac6430268..fd72f1b6d2a 100644 --- a/contrib/gitlab/src/mill/contrib/gitlab/GitlabPublishModule.scala +++ b/contrib/gitlab/src/mill/contrib/gitlab/GitlabPublishModule.scala @@ -16,7 +16,7 @@ trait GitlabPublishModule extends PublishModule { outer => def gitlabHeaders( systemProps: Map[String, String] = sys.props.toMap - ): Task[GitlabAuthHeaders] = T.task { + ): Task[GitlabAuthHeaders] = Task.Anon { val auth = tokenLookup.resolveGitlabToken(T.env, systemProps, T.workspace) auth match { case Left(msg) => @@ -30,7 +30,7 @@ trait GitlabPublishModule extends PublishModule { outer => def publishGitlab( readTimeout: Int = 60000, connectTimeout: Int = 5000 - ): define.Command[Unit] = T.command { + ): define.Command[Unit] = Task.Command { val gitlabRepo = publishRepository @@ -58,7 +58,7 @@ object GitlabPublishModule extends ExternalModule { publishArtifacts: mill.main.Tasks[PublishModule.PublishData], readTimeout: Int = 60000, connectTimeout: Int = 5000 - ): Command[Unit] = T.command { + ): Command[Unit] = Task.Command { val repo = ProjectRepository(gitlabRoot, projectId) val auth = GitlabAuthHeaders.privateToken(personalToken) diff --git a/contrib/gitlab/src/mill/contrib/gitlab/GitlabTokenLookup.scala b/contrib/gitlab/src/mill/contrib/gitlab/GitlabTokenLookup.scala index e59f9d97dd2..37ce9d334b8 100644 --- a/contrib/gitlab/src/mill/contrib/gitlab/GitlabTokenLookup.scala +++ b/contrib/gitlab/src/mill/contrib/gitlab/GitlabTokenLookup.scala @@ -110,7 +110,7 @@ object GitlabTokenLookup { * - File =Contents of a file on local disk. * - Custom = Own function * - * Possible additions, that can now be supported with Custom: KeyVault, Yaml, etc.. + * Possible additions, that can now be supported with Custom: KeyVault, Yaml, etc. */ sealed trait TokenSource case class Env(name: String) extends TokenSource diff --git a/contrib/gitlab/test/src/mill/contrib/gitlab/GitlabModuleTests.scala b/contrib/gitlab/test/src/mill/contrib/gitlab/GitlabModuleTests.scala index 5f0e16d98b2..f186651bb8d 100644 --- a/contrib/gitlab/test/src/mill/contrib/gitlab/GitlabModuleTests.scala +++ b/contrib/gitlab/test/src/mill/contrib/gitlab/GitlabModuleTests.scala @@ -1,6 +1,6 @@ package mill.contrib.gitlab -import mill.T +import mill.{T, Task} import mill.api.Result.Failure import mill.scalalib.publish.PomSettings import mill.testkit.UnitTester @@ -27,7 +27,7 @@ object GitlabModuleTests extends TestSuite { } // GitlabMavenRepository does not need to be a module, but it needs to be invoked from one. - // So for test purposes we make make a module with it to get a Ctx for evaluation + // So for test purposes we make a module with it to get a Ctx for evaluation object GLMvnRepo extends TestBaseModule with GitlabMavenRepository { override def gitlabRepository: GitlabPackageRepository = InstanceRepository("https://gl.local") diff --git a/contrib/jmh/src/mill/contrib/jmh/JmhModule.scala b/contrib/jmh/src/mill/contrib/jmh/JmhModule.scala index d950fe586c7..d32ee5e6b09 100644 --- a/contrib/jmh/src/mill/contrib/jmh/JmhModule.scala +++ b/contrib/jmh/src/mill/contrib/jmh/JmhModule.scala @@ -36,21 +36,22 @@ trait JmhModule extends JavaModule { def ivyDeps = super.ivyDeps() ++ Agg(ivy"org.openjdk.jmh:jmh-core:${jmhCoreVersion()}") def runJmh(args: String*) = - T.command { + Task.Command { val (_, resources) = generateBenchmarkSources() Jvm.runSubprocess( "org.openjdk.jmh.Main", classPath = (runClasspath() ++ generatorDeps()).map(_.path) ++ Seq(compileGeneratedSources().path, resources), mainArgs = args, - workingDir = T.ctx().dest + workingDir = T.ctx().dest, + javaHome = zincWorker().javaHome().map(_.path) ) } def listJmhBenchmarks(args: String*) = runJmh(("-l" +: args): _*) def compileGeneratedSources = - T { + Task { val dest = T.ctx().dest val (sourcesDir, _) = generateBenchmarkSources() val sources = os.walk(sourcesDir).filter(os.isFile) @@ -70,9 +71,9 @@ trait JmhModule extends JavaModule { // returns sources and resources directories def generateBenchmarkSources = - T { + Task { val dest = T.ctx().dest - + val forkedArgs = forkArgs().toSeq val sourcesDir = dest / "jmh_sources" val resourcesDir = dest / "jmh_resources" @@ -89,13 +90,15 @@ trait JmhModule extends JavaModule { sourcesDir.toString, resourcesDir.toString, "default" - ) + ), + javaHome = zincWorker().javaHome().map(_.path), + jvmArgs = forkedArgs ) (sourcesDir, resourcesDir) } - def generatorDeps = T { + def generatorDeps = Task { defaultResolver().resolveDeps( Agg(ivy"org.openjdk.jmh:jmh-generator-bytecode:${jmhGeneratorByteCodeVersion()}") ) diff --git a/contrib/jmh/test/src/mill/contrib/jmh/JmhModuleTest.scala b/contrib/jmh/test/src/mill/contrib/jmh/JmhModuleTest.scala index 64520975b98..75348c4d31b 100644 --- a/contrib/jmh/test/src/mill/contrib/jmh/JmhModuleTest.scala +++ b/contrib/jmh/test/src/mill/contrib/jmh/JmhModuleTest.scala @@ -16,7 +16,7 @@ object JmhModuleTest extends TestSuite { override def scalaVersion = sys.props.getOrElse("TEST_SCALA_2_13_VERSION", ???) override def jmhCoreVersion = "1.35" } - val testModuleSourcesPath: Path = os.Path(sys.env("MILL_TEST_RESOURCE_FOLDER")) / "jmh" + val testModuleSourcesPath: Path = os.Path(sys.env("MILL_TEST_RESOURCE_DIR")) / "jmh" def tests = Tests { test("jmh") { @@ -24,11 +24,12 @@ object JmhModuleTest extends TestSuite { val paths = EvaluatorPaths.resolveDestPaths(eval.outPath, jmh.listJmhBenchmarks()) val outFile = paths.dest / "benchmarks.out" val Right(result) = eval(jmh.listJmhBenchmarks("-o", outFile.toString)) - val expected = """Benchmarks: - |mill.contrib.jmh.Bench2.log - |mill.contrib.jmh.Bench2.sqrt - |mill.contrib.jmh.Bench1.measureShared - |mill.contrib.jmh.Bench1.measureUnshared""".stripMargin + val expected = + """Benchmarks: + |mill.contrib.jmh.Bench2.log + |mill.contrib.jmh.Bench2.sqrt + |mill.contrib.jmh.Bench1.measureShared + |mill.contrib.jmh.Bench1.measureUnshared""".stripMargin val out = os.read.lines(outFile).map(_.trim).mkString(System.lineSeparator()) assert(out == expected) } diff --git a/contrib/package.mill b/contrib/package.mill index 9dd33ac0bb7..d11cc3a0132 100644 --- a/contrib/package.mill +++ b/contrib/package.mill @@ -7,7 +7,6 @@ import de.tobiasroeser.mill.vcs.version.VcsVersion import com.goyeau.mill.scalafix.ScalafixModule import mill._ import mill.api.JarManifest -import mill.define.NamedTask import mill.main.Tasks import mill.scalalib._ import mill.scalalib.api.ZincWorkerUtil @@ -44,7 +43,7 @@ object `package` extends RootModule { // pure Java implementation def artifactSuffix: T[String] = "" - def scalaLibraryIvyDeps: T[Agg[Dep]] = T { Agg.empty[Dep] } + def scalaLibraryIvyDeps: T[Agg[Dep]] = Task { Agg.empty[Dep] } def ivyDeps = Agg(build.Deps.sbtTestInterface) def compileIvyDeps = Agg(build.Deps.testng) def runIvyDeps = Agg(build.Deps.testng) @@ -64,7 +63,7 @@ object `package` extends RootModule { def testTransitiveDeps = super.testTransitiveDeps() ++ T.traverse(build.Deps.play.keys.toSeq)(worker(_).testDep)() - def testArgs = T { + def testArgs = Task { super.testArgs() ++ Seq( s"-DTEST_PLAY_VERSION_2_6=${build.Deps.Play_2_6.playVersion}", @@ -111,15 +110,12 @@ object `package` extends RootModule { def compileModuleDeps = Seq(build.scalalib) def testTransitiveDeps = - super.testTransitiveDeps() ++ Seq(worker.testDep(), worker2.testDep()) + super.testTransitiveDeps() ++ Seq(worker2.testDep()) - def testArgs = T { + def testArgs = Task { super.testArgs() ++ Seq( - s"-DMILL_SCOVERAGE_VERSION=${build.Deps.scalacScoveragePlugin.dep.version}", s"-DMILL_SCOVERAGE2_VERSION=${build.Deps.scalacScoverage2Plugin.dep.version}", - s"-DTEST_SCALA_2_13_VERSION_FOR_SCOVERAGE_1=${build.Deps.scalaVersionForScoverageWorker1}", - s"-DTEST_SCALA_2_12_VERSION=2.12.15" // last supported 2.12 version for Scoverage 1.x ) } @@ -128,25 +124,12 @@ object `package` extends RootModule { super.testModuleDeps ++ Seq(build.scalalib, build.scalajslib, build.scalanativelib, build.contrib.buildinfo) - // Worker for Scoverage 1.x - object worker extends build.MillPublishScalaModule { - // scoverage is on an old Scala version which doesnt support scalafix - def fix(args: String*): Command[Unit] = T.command {} - def compileModuleDeps = Seq(build.main.api) - def moduleDeps = Seq(scoverage.api) - def testDepPaths = T { Seq(compile().classes) } - - // compile-time only, need to provide the correct scoverage version at runtime - def compileIvyDeps = Agg(build.Deps.scalacScoveragePlugin) - def scalaVersion = build.Deps.scalaVersionForScoverageWorker1 - } - // Worker for Scoverage 2.0 object worker2 extends build.MillPublishScalaModule { def compileModuleDeps = Seq(build.main.api) def moduleDeps = Seq(scoverage.api) - def testDepPaths = T { Seq(compile().classes) } - def compileIvyDeps = T { + def testDepPaths = Task { Seq(compile().classes) } + def compileIvyDeps = Task { Agg( // compile-time only, need to provide the correct scoverage version at runtime build.Deps.scalacScoverage2Plugin, @@ -160,7 +143,8 @@ object `package` extends RootModule { object buildinfo extends ContribModule { def compileModuleDeps = Seq(build.scalalib, build.scalajslib, build.scalanativelib) - def testModuleDeps = super.testModuleDeps ++ Seq(build.scalalib, build.scalajslib, build.scalanativelib) + def testModuleDeps = + super.testModuleDeps ++ Seq(build.scalalib, build.scalajslib, build.scalanativelib) } object proguard extends ContribModule { @@ -227,17 +211,4 @@ object `package` extends RootModule { def compileModuleDeps = Seq(build.scalalib) def testModuleDeps = super.testModuleDeps ++ Seq(build.scalalib) } - - object errorprone extends ContribModule with BuildInfo { - def compileModuleDeps = Seq(build.scalalib) - def testModuleDeps = super.testModuleDeps ++ Seq(build.scalalib) - def buildInfoPackageName = "mill.contrib.errorprone" - def buildInfoObjectName = "BuildInfo" - def buildInfoMembers = Seq(BuildInfo.Value("errorProneVersion", Deps.RuntimeDeps.errorProneCore.version)) - } - - object checkstyle extends ContribModule { - def compileModuleDeps = Seq(build.scalalib) - def testModuleDeps = super.testModuleDeps ++ Seq(build.scalalib) - } } diff --git a/contrib/playlib/api/src/mill/playlib/api/RouteCompilerType.java b/contrib/playlib/api/src/mill/playlib/api/RouteCompilerType.java index 35b98e46f62..8249f9ead73 100644 --- a/contrib/playlib/api/src/mill/playlib/api/RouteCompilerType.java +++ b/contrib/playlib/api/src/mill/playlib/api/RouteCompilerType.java @@ -1,6 +1,6 @@ package mill.playlib.api; public enum RouteCompilerType { - InjectedGenerator, - StaticGenerator + InjectedGenerator, + StaticGenerator } diff --git a/contrib/playlib/api/src/mill/playlib/api/RouteCompilerWorkerApi.java b/contrib/playlib/api/src/mill/playlib/api/RouteCompilerWorkerApi.java index 24a071e7f80..2fbc3a07b1f 100644 --- a/contrib/playlib/api/src/mill/playlib/api/RouteCompilerWorkerApi.java +++ b/contrib/playlib/api/src/mill/playlib/api/RouteCompilerWorkerApi.java @@ -1,13 +1,12 @@ package mill.playlib.api; public interface RouteCompilerWorkerApi { - String compile(java.io.File[] files, - String[] additionalImports, - boolean forwardsRouter, - boolean reverseRouter, - boolean namespaceReverseRouter, - RouteCompilerType generatorType, - java.io.File dest); - - + String compile( + java.io.File[] files, + String[] additionalImports, + boolean forwardsRouter, + boolean reverseRouter, + boolean namespaceReverseRouter, + RouteCompilerType generatorType, + java.io.File dest); } diff --git a/contrib/playlib/api/src/mill/playlib/api/Versions.java b/contrib/playlib/api/src/mill/playlib/api/Versions.java index 9c31b143eba..5b48972ec01 100644 --- a/contrib/playlib/api/src/mill/playlib/api/Versions.java +++ b/contrib/playlib/api/src/mill/playlib/api/Versions.java @@ -1,9 +1,9 @@ package mill.playlib.api; public class Versions { - public static final String PLAY_2_6 = "2.6"; - public static final String PLAY_2_7 = "2.7"; - public static final String PLAY_2_8 = "2.8"; - public static final String PLAY_2_9 = "2.9"; - public static final String PLAY_3_0 = "3.0"; + public static final String PLAY_2_6 = "2.6"; + public static final String PLAY_2_7 = "2.7"; + public static final String PLAY_2_8 = "2.8"; + public static final String PLAY_2_9 = "2.9"; + public static final String PLAY_3_0 = "3.0"; } diff --git a/contrib/playlib/readme.adoc b/contrib/playlib/readme.adoc index 39a6c3648f7..9070e9cd6dc 100644 --- a/contrib/playlib/readme.adoc +++ b/contrib/playlib/readme.adoc @@ -44,8 +44,8 @@ import $ivy.`com.lihaoyi::mill-contrib-playlib:`, mill.playlib._ object core extends PlayModule { // config - override def scalaVersion = T { "2.13.12" } - override def playVersion = T { "2.8.20" } + override def scalaVersion = Task { "2.13.12" } + override def playVersion = Task { "2.8.20" } object test extends PlayTests } @@ -119,8 +119,8 @@ import $ivy.`com.lihaoyi::mill-contrib-playlib:`, mill.playlib._ object core extends PlayApiModule { // config - override def scalaVersion = T { "2.13.12" } - override def playVersion = T { "2.8.20" } + override def scalaVersion = Task { "2.13.12" } + override def playVersion = Task { "2.8.20" } object test extends PlayTests } @@ -128,8 +128,8 @@ object core extends PlayApiModule { == Play configuration options -The Play modules themselves don't have specific configuration options at this point but the <> and the <<_twirl_configuration_options>> are applicable. +The Play modules themselves don't have specific configuration options at this point but the <<_router_configuration_options,router +module configuration options>> and the <> are applicable. == Additional play libraries @@ -157,18 +157,18 @@ import $ivy.`com.lihaoyi::mill-contrib-playlib:$MILL_VERSION`, mill.playlib._ object core extends PlayApiModule { // config - override def scalaVersion = T{ "2.13.12" } - override def playVersion = T{ "2.8.20" } + override def scalaVersion = Task { "2.13.12" } + override def playVersion = Task { "2.8.20" } object test extends PlayTests - override def ivyDeps = T{ super.ivyDeps() ++ Agg(ws(), filters()) } + override def ivyDeps = Task { super.ivyDeps() ++ Agg(ws(), filters()) } } ---- == Commands equivalence -Mill commands are targets on a named build. For example if your build is called `core`: +Mill commands are tasks on a named build. For example if your build is called `core`: * compile: `core.compile` * run: _NOT Implemented yet_. It can be approximated with `mill -w core.runBackground` but this @@ -199,8 +199,8 @@ import $ivy.`com.lihaoyi::mill-contrib-playlib:`, mill.playlib._ object core extends PlayModule { // config - override def scalaVersion = T { "2.13.12" } - override def playVersion = T { "2.8.20" } + override def scalaVersion = Task { "2.13.12" } + override def playVersion = Task { "2.8.20" } object test extends PlayTests } @@ -240,8 +240,8 @@ import $ivy.`com.lihaoyi::mill-contrib-playlib:`, mill.playlib._ object build extends RootModule with PlayModule { // config - override def scalaVersion = T { "2.13.12" } - override def playVersion = T { "2.8.20" } + override def scalaVersion = Task { "2.13.12" } + override def playVersion = Task { "2.8.20" } object test extends PlayTests } @@ -283,8 +283,8 @@ import mill._ import $ivy.`com.lihaoyi::mill-contrib-playlib:`, mill.playlib._ object app extends ScalaModule with RouterModule { - def playVersion = T{ "2.8.20" } - def scalaVersion = T { "2.13.12" } + def playVersion = Task { "2.8.20" } + def scalaVersion = Task { "2.13.12" } } ---- diff --git a/contrib/playlib/src/mill/playlib/Dependencies.scala b/contrib/playlib/src/mill/playlib/Dependencies.scala index a91503498ff..096710190e5 100644 --- a/contrib/playlib/src/mill/playlib/Dependencies.scala +++ b/contrib/playlib/src/mill/playlib/Dependencies.scala @@ -1,20 +1,20 @@ package mill.playlib -import mill.{Agg, T} +import mill.{Agg, Task} import mill.scalalib._ private[playlib] trait Dependencies extends ScalaModule with Version { - def core = T { component("play") } - def guice = T { component("play-guice") } - def server = T { component("play-server") } - def logback = T { component("play-logback") } - def evolutions = T { component("play-jdbc-evolutions") } - def jdbc = T { component("play-jdbc") } - def filters = T { component("filters-helpers") } - def ws = T { component("play-ahc-ws") } - def caffeine = T { component("play-caffeine-cache") } + def core = Task { component("play") } + def guice = Task { component("play-guice") } + def server = Task { component("play-server") } + def logback = Task { component("play-logback") } + def evolutions = Task { component("play-jdbc-evolutions") } + def jdbc = Task { component("play-jdbc") } + def filters = Task { component("filters-helpers") } + def ws = Task { component("play-ahc-ws") } + def caffeine = Task { component("play-caffeine-cache") } - override def ivyDeps = T { + override def ivyDeps = Task { super.ivyDeps() ++ Agg( core(), guice(), diff --git a/contrib/playlib/src/mill/playlib/Layout.scala b/contrib/playlib/src/mill/playlib/Layout.scala index 54a7a08b07e..d5492e33160 100644 --- a/contrib/playlib/src/mill/playlib/Layout.scala +++ b/contrib/playlib/src/mill/playlib/Layout.scala @@ -1,13 +1,13 @@ package mill.playlib -import mill.T +import mill.Task import mill.scalalib._ private[playlib] trait Layout extends JavaModule { - def conf = T.sources { millSourcePath / "conf" } - def app = T.sources { millSourcePath / "app" } + def conf = Task.Sources { millSourcePath / "conf" } + def app = Task.Sources { millSourcePath / "app" } - override def sources = T.sources { app() } - override def resources = T.sources { conf() } + override def sources = Task { app() } + override def resources = Task { conf() } } diff --git a/contrib/playlib/src/mill/playlib/PlayModule.scala b/contrib/playlib/src/mill/playlib/PlayModule.scala index bd68aed7c6d..591ec8c8be3 100644 --- a/contrib/playlib/src/mill/playlib/PlayModule.scala +++ b/contrib/playlib/src/mill/playlib/PlayModule.scala @@ -4,10 +4,12 @@ import mill.define.Task import mill.playlib.api.Versions import mill.scalalib._ import mill.{Agg, Args, T} +import mill.api.PathRef +import mill.define.Target trait PlayApiModule extends Dependencies with Router with Server { trait PlayTests extends ScalaTests with TestModule.ScalaTest { - override def ivyDeps = T { + override def ivyDeps = Task { val scalatestPlusPlayVersion = playMinorVersion() match { case Versions.PLAY_2_6 => "3.1.3" case Versions.PLAY_2_7 => "4.0.3" @@ -17,14 +19,14 @@ trait PlayApiModule extends Dependencies with Router with Server { } Agg(ivy"org.scalatestplus.play::scalatestplus-play::${scalatestPlusPlayVersion}") } - override def sources = T.sources { millSourcePath } + override def sources: Target[Seq[PathRef]] = Task.Sources { millSourcePath } } - def start(args: Task[Args] = T.task(Args())) = T.command { run(args)() } + def start(args: Task[Args] = Task.Anon(Args())) = Task.Command { run(args)() } } trait PlayModule extends PlayApiModule with Static with Twirl { - override def twirlVersion = T { + override def twirlVersion: T[String] = Task { playMinorVersion() match { case "2.6" => "1.3.16" case "2.7" => "1.4.2" diff --git a/contrib/playlib/src/mill/playlib/RouteCompilerWorkerModule.scala b/contrib/playlib/src/mill/playlib/RouteCompilerWorkerModule.scala index 854be532ff0..412284f1c85 100644 --- a/contrib/playlib/src/mill/playlib/RouteCompilerWorkerModule.scala +++ b/contrib/playlib/src/mill/playlib/RouteCompilerWorkerModule.scala @@ -1,10 +1,10 @@ package mill.playlib -import mill.{Module, T} +import mill.{Module, Task} import mill.define.{Discover, ExternalModule, Worker} trait RouteCompilerWorkerModule extends Module { - def routeCompilerWorker: Worker[RouteCompilerWorker] = T.worker { + def routeCompilerWorker: Worker[RouteCompilerWorker] = Task.Worker { new RouteCompilerWorker() } } diff --git a/contrib/playlib/src/mill/playlib/Router.scala b/contrib/playlib/src/mill/playlib/Router.scala index 411bdb39fb7..32b75933dc5 100644 --- a/contrib/playlib/src/mill/playlib/Router.scala +++ b/contrib/playlib/src/mill/playlib/Router.scala @@ -1,7 +1,7 @@ package mill.playlib -import mill.T +import mill.Task private[playlib] trait Router extends RouterModule with Layout { - override def routes = T { conf() } + override def routes = Task { conf() } } diff --git a/contrib/playlib/src/mill/playlib/RouterModule.scala b/contrib/playlib/src/mill/playlib/RouterModule.scala index a89e21ef453..11b921257b5 100644 --- a/contrib/playlib/src/mill/playlib/RouterModule.scala +++ b/contrib/playlib/src/mill/playlib/RouterModule.scala @@ -5,13 +5,13 @@ import mill.util.Util.millProjectModule import mill.playlib.api.RouteCompilerType import mill.scalalib._ import mill.scalalib.api._ -import mill.{Agg, T} +import mill.{Agg, T, Task} trait RouterModule extends ScalaModule with Version { - def routes: T[Seq[PathRef]] = T.sources { millSourcePath / "routes" } + def routes: T[Seq[PathRef]] = Task.Sources { millSourcePath / "routes" } - def routeFiles = T { + def routeFiles = Task { val paths = routes().flatMap(file => os.walk(file.path)) val routeFiles = paths.filter(_.ext == "routes") ++ paths.filter(_.last == "routes") routeFiles.map(f => PathRef(f)) @@ -45,7 +45,7 @@ trait RouterModule extends ScalaModule with Version { */ def generatorType: RouteCompilerType = RouteCompilerType.InjectedGenerator - def routerClasspath: T[Agg[PathRef]] = T { + def routerClasspath: T[Agg[PathRef]] = Task { defaultResolver().resolveDeps( playMinorVersion() match { case "2.6" | "2.7" | "2.8" => @@ -60,7 +60,7 @@ trait RouterModule extends ScalaModule with Version { protected val routeCompilerWorker: RouteCompilerWorkerModule = RouteCompilerWorkerModule - def compileRouter: T[CompilationResult] = T.persistent { + def compileRouter: T[CompilationResult] = Task(persistent = true) { T.log.debug(s"compiling play routes with ${playVersion()} worker") routeCompilerWorker.routeCompilerWorker().compile( routerClasspath = playRouterToolsClasspath(), @@ -74,26 +74,27 @@ trait RouterModule extends ScalaModule with Version { ) } - def playRouteCompilerWorkerClasspath = T { + def playRouteCompilerWorkerClasspath = Task { millProjectModule( s"mill-contrib-playlib-worker-${playMinorVersion()}", repositoriesTask(), artifactSuffix = playMinorVersion() match { case "2.6" => "_2.12" + case "2.7" | "2.8" => "_2.13" case _ => "_2.13" } ) } - def playRouterToolsClasspath = T { + def playRouterToolsClasspath = Task { playRouteCompilerWorkerClasspath() ++ routerClasspath() } - def routerClasses = T { + def routerClasses = Task { Seq(compileRouter().classes) } - override def generatedSources = T { + override def generatedSources = Task { super.generatedSources() ++ routerClasses() } } diff --git a/contrib/playlib/src/mill/playlib/Server.scala b/contrib/playlib/src/mill/playlib/Server.scala index 66947e0e0eb..b6e07296954 100644 --- a/contrib/playlib/src/mill/playlib/Server.scala +++ b/contrib/playlib/src/mill/playlib/Server.scala @@ -1,26 +1,26 @@ package mill.playlib import mill.scalalib._ -import mill.{Agg, T} +import mill.{Agg, Task} private[playlib] trait Server extends ScalaModule with Version { - def nettyServer = T { component("play-netty-server") } + def nettyServer = Task { component("play-netty-server") } - def akkaHttpServer = T { component("play-akka-http-server") } + def akkaHttpServer = Task { component("play-akka-http-server") } - def pekkoHttpServer = T { component("play-pekko-http-server") } + def pekkoHttpServer = Task { component("play-pekko-http-server") } - def playServerProvider = T { + def playServerProvider = Task { if (playVersion().startsWith("2.")) akkaHttpServer() else pekkoHttpServer() } - override def runIvyDeps = T { + override def runIvyDeps = Task { super.runIvyDeps() ++ Agg(playServerProvider()) } - override def mainClass = T { Some("play.core.server.ProdServerStart") } + override def mainClass = Task { Some("play.core.server.ProdServerStart") } } diff --git a/contrib/playlib/src/mill/playlib/Static.scala b/contrib/playlib/src/mill/playlib/Static.scala index fe6e9ccd259..1ddccd41158 100644 --- a/contrib/playlib/src/mill/playlib/Static.scala +++ b/contrib/playlib/src/mill/playlib/Static.scala @@ -6,31 +6,31 @@ import java.nio.file.attribute.BasicFileAttributes import java.util import mill.scalalib.{Lib, ScalaModule} -import mill.{PathRef, T} +import mill.{PathRef, T, Task} trait Static extends ScalaModule { /** * project resources including configuration, webjars and static assets */ - override def resources = T.sources { + override def resources = Task.Sources { super.resources() :+ webJarResources() :+ staticAssets() } /** - * Resource base path of packaged assets (path they will appear in in the jar) + * Resource base path of packaged assets (path they will appear in the jar) */ - def assetsPath = T { "public" } + def assetsPath = Task { "public" } /** * Directories to include assets from */ - def assetSources = T.sources { millSourcePath / assetsPath() } + def assetSources = Task.Sources { millSourcePath / assetsPath() } /* Collected static assets for the project */ - def staticAssets = T { + def staticAssets = Task { val toPath = os.Path(assetsPath(), T.dest) assetSources().foreach { pathRef => val fromPath = pathRef.path @@ -46,14 +46,14 @@ trait Static extends ScalaModule { /** * webjar dependencies - created from transitive ivy deps */ - def webJarDeps = T { + def webJarDeps = Task { transitiveIvyDeps().filter(_.dep.module.organization.value == "org.webjars") } /** * jar files of web jars */ - def webJars = T { + def webJars = Task { Lib.resolveDependencies( repositoriesTask(), webJarDeps() @@ -63,7 +63,7 @@ trait Static extends ScalaModule { /** * webjar resources extracted from their source jars with version from path removed */ - def webJarResources = T { + def webJarResources = Task { extractWebJars(webJars().toSeq, os.Path(assetsPath(), T.dest) / "lib") PathRef(T.dest) } diff --git a/contrib/playlib/src/mill/playlib/Twirl.scala b/contrib/playlib/src/mill/playlib/Twirl.scala index d8fffc0b03a..367d3d6d0df 100644 --- a/contrib/playlib/src/mill/playlib/Twirl.scala +++ b/contrib/playlib/src/mill/playlib/Twirl.scala @@ -1,13 +1,13 @@ package mill.playlib -import mill.T +import mill.Task import mill.twirllib._ trait Twirl extends TwirlModule with Layout { - override def twirlSources = T.sources { app() } + override def twirlSources = Task { app() } - override def twirlImports = T { + override def twirlImports = Task { super.twirlImports() ++ Seq( "models._", "controllers._", @@ -19,9 +19,9 @@ trait Twirl extends TwirlModule with Layout { ) } - def twirlOutput = T { Seq(compileTwirl().classes) } + def twirlOutput = Task { Seq(compileTwirl().classes) } - override def generatedSources = T { + override def generatedSources = Task { super.generatedSources() ++ twirlOutput() } } diff --git a/contrib/playlib/src/mill/playlib/Version.scala b/contrib/playlib/src/mill/playlib/Version.scala index d70ae9ae726..a9640fac8ee 100644 --- a/contrib/playlib/src/mill/playlib/Version.scala +++ b/contrib/playlib/src/mill/playlib/Version.scala @@ -1,6 +1,6 @@ package mill.playlib -import mill.T +import mill.{T, Task} import mill.define.Module import mill.scalalib._ @@ -8,15 +8,15 @@ private[playlib] trait Version extends Module { def playVersion: T[String] - private[playlib] def playMinorVersion: T[String] = T { + private[playlib] def playMinorVersion: T[String] = Task { playVersion().split('.').take(2).mkString(".") } - private[playlib] def playOrganization: T[String] = T.task { + private[playlib] def playOrganization: Task[String] = Task.Anon { if (playVersion().startsWith("2.")) "com.typesafe.play" else "org.playframework" } - private[playlib] def component(id: String) = T.task { + private[playlib] def component(id: String): Task[Dep] = Task.Anon { ivy"${playOrganization()}::$id::${playVersion()}" } } diff --git a/contrib/playlib/test/src/mill/playlib/PlayModuleTests.scala b/contrib/playlib/test/src/mill/playlib/PlayModuleTests.scala index 34b533fa073..51629d2fc86 100644 --- a/contrib/playlib/test/src/mill/playlib/PlayModuleTests.scala +++ b/contrib/playlib/test/src/mill/playlib/PlayModuleTests.scala @@ -1,6 +1,7 @@ package mill package playlib +import mill.scalalib.api.ZincWorkerUtil import mill.testkit.{TestBaseModule, UnitTester} import utest.{TestSuite, Tests, assert, _} @@ -13,10 +14,10 @@ object PlayModuleTests extends TestSuite with PlayTestSuite { override def playVersion = crossPlayVersion override def scalaVersion = crossScalaVersion object test extends PlayTests - override def ivyDeps = T { super.ivyDeps() ++ Agg(ws()) } + override def ivyDeps = Task { super.ivyDeps() ++ Agg(ws()) } } } - val resourcePath: os.Path = os.Path(sys.env("MILL_TEST_RESOURCE_FOLDER")) / "playmulti" + val resourcePath: os.Path = os.Path(sys.env("MILL_TEST_RESOURCE_DIR")) / "playmulti" def tests: Tests = Tests { test("layout") { @@ -39,7 +40,7 @@ object PlayModuleTests extends TestSuite with PlayTestSuite { app.value.map(_.path.relativeTo(playmulti.millSourcePath).toString()) == Seq( "core/app" ), - sources == app, + sources.value == app.value, resources.value.map(_.path.relativeTo(playmulti.millSourcePath).toString()).contains( "core/conf" ), @@ -102,7 +103,7 @@ object PlayModuleTests extends TestSuite with PlayTestSuite { os.RelPath("controllers/routes$javascript.class"), os.RelPath("controllers/javascript/ReverseHomeController.class"), os.RelPath("controllers/javascript/ReverseAssets.class"), - if (scalaVersion.startsWith("3.")) os.RelPath("router/Routes$$anon$1.class") + if (ZincWorkerUtil.isScala3(scalaVersion)) os.RelPath("router/Routes$$anon$1.class") else os.RelPath("router/Routes$$anonfun$routes$1.class"), os.RelPath("router/Routes.class"), os.RelPath("router/RoutesPrefix$.class"), diff --git a/contrib/playlib/test/src/mill/playlib/PlaySingleApiModuleTests.scala b/contrib/playlib/test/src/mill/playlib/PlaySingleApiModuleTests.scala index 5fdeb266dae..34773f2fcad 100644 --- a/contrib/playlib/test/src/mill/playlib/PlaySingleApiModuleTests.scala +++ b/contrib/playlib/test/src/mill/playlib/PlaySingleApiModuleTests.scala @@ -1,6 +1,6 @@ package mill.playlib -import mill.T +import mill.{T, Task} import mill.testkit.{TestBaseModule, UnitTester} import utest.{TestSuite, Tests, assert, _} @@ -8,12 +8,12 @@ object PlaySingleApiModuleTests extends TestSuite with PlayTestSuite { object playsingleapi extends TestBaseModule with PlayApiModule with SingleModule { override val millSourcePath = os.temp() // workaround problem in `SingleModule` - override def playVersion = T { testPlay28 } - override def scalaVersion = T { "2.13.12" } + override def playVersion = Task { testPlay28 } + override def scalaVersion = Task { "2.13.12" } object test extends PlayTests } - val resourcePath: os.Path = os.Path(sys.env("MILL_TEST_RESOURCE_FOLDER")) / "playsingleapi" + val resourcePath: os.Path = os.Path(sys.env("MILL_TEST_RESOURCE_DIR")) / "playsingleapi" def tests: Tests = Tests { test("playVersion") { @@ -38,8 +38,8 @@ object PlaySingleApiModuleTests extends TestSuite with PlayTestSuite { "conf" ), app.value.map(_.path.relativeTo(playsingleapi.millSourcePath).toString()) == Seq("app"), - sources == app, - resources == conf, + sources.value == app.value, + resources.value == conf.value, testSources.value.map( _.path.relativeTo(playsingleapi.millSourcePath).toString() ) == Seq( diff --git a/contrib/playlib/test/src/mill/playlib/PlaySingleModuleTests.scala b/contrib/playlib/test/src/mill/playlib/PlaySingleModuleTests.scala index 4b6d9f815b5..17f00324481 100644 --- a/contrib/playlib/test/src/mill/playlib/PlaySingleModuleTests.scala +++ b/contrib/playlib/test/src/mill/playlib/PlaySingleModuleTests.scala @@ -1,6 +1,6 @@ package mill.playlib -import mill.T +import mill.{T, Task} import mill.testkit.{TestBaseModule, UnitTester} import utest.{TestSuite, Tests, assert, _} @@ -8,12 +8,12 @@ object PlaySingleModuleTests extends TestSuite with PlayTestSuite { object playsingle extends TestBaseModule with PlayModule with SingleModule { override val millSourcePath = os.temp() // workaround problem in `SingleModule` - override def playVersion = T { testPlay28 } - override def scalaVersion = T { sys.props.getOrElse("TEST_SCALA_2_13_VERSION", ???) } + override def playVersion = Task { testPlay28 } + override def scalaVersion = Task { sys.props.getOrElse("TEST_SCALA_2_13_VERSION", ???) } object test extends PlayTests } - val resourcePath: os.Path = os.Path(sys.env("MILL_TEST_RESOURCE_FOLDER")) / "playsingle" + val resourcePath: os.Path = os.Path(sys.env("MILL_TEST_RESOURCE_DIR")) / "playsingle" def tests: Tests = Tests { test("layout") { @@ -27,7 +27,7 @@ object PlaySingleModuleTests extends TestSuite with PlayTestSuite { assert( conf.value.map(_.path.relativeTo(playsingle.millSourcePath).toString()) == Seq("conf"), app.value.map(_.path.relativeTo(playsingle.millSourcePath).toString()) == Seq("app"), - sources == app, + sources.value == app.value, resources.value.map(_.path.relativeTo(playsingle.millSourcePath).toString()).contains( "conf" ), diff --git a/contrib/playlib/test/src/mill/playlib/RouterModuleTests.scala b/contrib/playlib/test/src/mill/playlib/RouterModuleTests.scala index 9431fd47312..718ddf27670 100644 --- a/contrib/playlib/test/src/mill/playlib/RouterModuleTests.scala +++ b/contrib/playlib/test/src/mill/playlib/RouterModuleTests.scala @@ -22,7 +22,7 @@ object RouterModuleTests extends TestSuite with PlayTestSuite { } - val resourceFolder = os.Path(sys.env("MILL_TEST_RESOURCE_FOLDER")) + val resourceFolder = os.Path(sys.env("MILL_TEST_RESOURCE_DIR")) val resourcePath: os.Path = resourceFolder / "hello-world" val invalidResourcePath: os.Path = resourceFolder / "invalid" val invalidSubResourcePath: os.Path = resourceFolder / "invalidsub" diff --git a/contrib/proguard/readme.adoc b/contrib/proguard/readme.adoc index 1f9d0ecd27a..90a945dc499 100644 --- a/contrib/proguard/readme.adoc +++ b/contrib/proguard/readme.adoc @@ -26,15 +26,15 @@ import contrib.proguard._ object foo extends ScalaModule with Proguard { def scalaVersion = "2.13.8" - override def shrink: T[Boolean] = T { true } - override def optimize: T[Boolean] = T { false } - override def obfuscate: T[Boolean] = T { false } + override def shrink: T[Boolean] = Task { true } + override def optimize: T[Boolean] = Task { false } + override def obfuscate: T[Boolean] = Task { false } // https://github.com/Guardsquare/proguard/releases override def proguardVersion = T("7.3.2") // tell Proguard where to enter your app, so it can optimise outwards from there - override def entryPoint = T { + override def entryPoint = Task { s"""|-keep public class myProject.main.myApp { | public ReturnType main(ParamTypeA, ParamTypeB); |} diff --git a/contrib/proguard/src/mill/contrib/proguard/Proguard.scala b/contrib/proguard/src/mill/contrib/proguard/Proguard.scala index 7076abb70a5..597a56b6f95 100644 --- a/contrib/proguard/src/mill/contrib/proguard/Proguard.scala +++ b/contrib/proguard/src/mill/contrib/proguard/Proguard.scala @@ -1,7 +1,7 @@ package mill.contrib.proguard import mill.java9rtexport.Export -import mill.T +import mill.{T, Task} import mill.Agg import mill.api.{Loose, PathRef} import mill.util.Jvm @@ -11,10 +11,10 @@ import os.{Path, Shellable} /** * Adds proguard capabilities when mixed-in to a module * - * The target name is `proguard`. This runs proguard on the output jar of `asssembly` + * The target name is `proguard`. This runs proguard on the output jar of `assembly` * and outputs a shrunk/obfuscated/optimized jar under `out.jar` in the `dest/` folder. * - * Sensible defaults are provided, so no members require overriding.. + * Sensible defaults are provided, so no members require overriding. */ trait Proguard extends ScalaModule { @@ -22,7 +22,7 @@ trait Proguard extends ScalaModule { * The version of proguard to download from Maven. * https://mvnrepository.com/artifact/com.guardsquare/proguard-base */ - def proguardVersion: T[String] = T { + def proguardVersion: T[String] = Task { T.log.error( "Using default proguard version is deprecated. Please override target proguardVersion to specify the version." ) @@ -30,63 +30,53 @@ trait Proguard extends ScalaModule { } /** Run the "shrink" step in the proguard pipeline. Defaults to true. */ - def shrink: T[Boolean] = T { true } + def shrink: T[Boolean] = Task { true } /** Run the "optimize" step in the proguard pipeline. Defaults to true. */ - def optimize: T[Boolean] = T { true } + def optimize: T[Boolean] = Task { true } /** Run the "obfuscate" step in the proguard pipeline. Defaults to true. */ - def obfuscate: T[Boolean] = T { true } + def obfuscate: T[Boolean] = Task { true } /** * Run the "optimize" step in the proguard pipeline. Defaults to true. * * Note that this is required for Java 7 and above. */ - def preverify: T[Boolean] = T { true } + def preverify: T[Boolean] = Task { true } /** * The path to JAVA_HOME. * * This is used for both the `java` command binary, - * as well as the standard library jars. + * and the standard library jars. * Defaults to the `java.home` system property. * Keep in sync with [[java9RtJar]]- */ - def javaHome: T[PathRef] = T.input { + def javaHome: T[PathRef] = Task.Input { PathRef(Path(sys.props("java.home"))) } /** Specifies the input jar to proguard. Defaults to the output of the `assembly` task. */ - def inJar: T[PathRef] = T { assembly() } + def inJar: T[PathRef] = Task { assembly() } /** * This needs to return the Java RT JAR if on Java 9 or above. * Keep in sync with [[javaHome]]. */ - def java9RtJar: T[Seq[PathRef]] = T { - if (mill.main.client.Util.isJava9OrAbove) { - val rt = T.dest / Export.rtJarName - if (!os.exists(rt)) { - T.log.outputStream.println( - s"Preparing Java runtime JAR; this may take a minute or two ..." - ) - Export.rtTo(rt.toIO, false) - } - Seq(PathRef(rt)) - } else { - Seq() - } + def java9RtJar: T[Seq[PathRef]] = Task { + if (mill.main.client.Util.isJava9OrAbove) Seq(PathRef(T.home / Export.rtJarName)) + else Seq() } /** * The library jars proguard requires * Defaults the jars under `javaHome`. */ - def libraryJars: T[Seq[PathRef]] = T { + def libraryJars: T[Seq[PathRef]] = Task { val javaJars = os.list(javaHome().path / "lib", sort = false).filter(_.ext == "jar").toSeq.map(PathRef(_)) - javaJars ++ java9RtJar() + javaJars } /** @@ -96,7 +86,7 @@ trait Proguard extends ScalaModule { * The stdout and stderr of the command are written to the `dest/` folder. * The output jar is written to `dest/our.jar`. */ - def proguard: T[PathRef] = T { + def proguard: T[PathRef] = Task { val outJar = T.dest / "out.jar" val args = Seq[Shellable]( @@ -106,7 +96,10 @@ trait Proguard extends ScalaModule { "-outjars", outJar, "-libraryjars", - libraryJars().map(_.path).mkString(java.io.File.pathSeparator), + ( + libraryJars().map(_.path) ++ + Seq("/jmods/java.base.jmod(!**.jar;!module-info.class)") + ).mkString(java.io.File.pathSeparator), entryPoint(), additionalOptions() ).flatMap(_.value) @@ -134,13 +127,13 @@ trait Proguard extends ScalaModule { * The location of the proguard jar files. * These are downloaded from JCenter and fed to `java -cp` */ - def proguardClasspath: T[Loose.Agg[PathRef]] = T { + def proguardClasspath: T[Loose.Agg[PathRef]] = Task { defaultResolver().resolveDeps( Agg(ivy"com.guardsquare:proguard-base:${proguardVersion()}") ) } - private def steps: T[Seq[String]] = T { + private def steps: T[Seq[String]] = Task { (if (optimize()) Seq() else Seq("-dontoptimize")) ++ (if (obfuscate()) Seq() else Seq("-dontobfuscate")) ++ (if (shrink()) Seq() else Seq("-dontshrink")) ++ @@ -154,7 +147,7 @@ trait Proguard extends ScalaModule { * Can be overridden to specify a different entrypoint, * or additional entrypoints can be specified with `additionalOptions`. */ - def entryPoint: T[String] = T { + def entryPoint: T[String] = Task { s"""|-keep public class ${finalMainClass()} { | public static void main(java.lang.String[]); |} @@ -166,7 +159,7 @@ trait Proguard extends ScalaModule { * * These are fed as-is to the proguard command. */ - def additionalOptions: T[Seq[String]] = T { + def additionalOptions: T[Seq[String]] = Task { T.log.error( "Proguard is set to not warn about message: can't find referenced method 'void invoke()' in library class java.lang.invoke.MethodHandle" ) diff --git a/contrib/proguard/test/src/mill/contrib/proguard/ProguardTests.scala b/contrib/proguard/test/src/mill/contrib/proguard/ProguardTests.scala index 199b9e76bea..d563315bc1d 100644 --- a/contrib/proguard/test/src/mill/contrib/proguard/ProguardTests.scala +++ b/contrib/proguard/test/src/mill/contrib/proguard/ProguardTests.scala @@ -15,16 +15,16 @@ object ProguardTests extends TestSuite { object proguard extends TestBaseModule with ScalaModule with Proguard { override def scalaVersion: T[String] = T(sys.props.getOrElse("MILL_SCALA_2_13_VERSION", ???)) - def proguardContribClasspath = T { + def proguardContribClasspath = Task { millProjectModule("mill-contrib-proguard", repositoriesTask()) } - override def runClasspath: Target[Seq[PathRef]] = - T { super.runClasspath() ++ proguardContribClasspath() } + override def runClasspath: T[Seq[PathRef]] = + Task { super.runClasspath() ++ proguardContribClasspath() } } - val testModuleSourcesPath: Path = os.Path(sys.env("MILL_TEST_RESOURCE_FOLDER")) / "proguard" + val testModuleSourcesPath: Path = os.Path(sys.env("MILL_TEST_RESOURCE_DIR")) / "proguard" def tests: Tests = Tests { test("Proguard module") { diff --git a/contrib/scalapblib/readme.adoc b/contrib/scalapblib/readme.adoc index 6f1b0696e2b..bddf2e3aa5f 100644 --- a/contrib/scalapblib/readme.adoc +++ b/contrib/scalapblib/readme.adoc @@ -78,6 +78,6 @@ object example extends ScalaPBModule { def scalaVersion = "2.12.6" def scalaPBVersion = "0.7.4" override def scalaPBAdditionalArgs = - Seq(s"--zio_out=${T.dest.toIO.getCanonicalPath}") + Seq(s"--zio_out=${Task.dest.toIO.getCanonicalPath}") } ---- diff --git a/contrib/scalapblib/src/mill/contrib/scalapblib/ScalaPBModule.scala b/contrib/scalapblib/src/mill/contrib/scalapblib/ScalaPBModule.scala index dc59659170d..e28fa336f02 100644 --- a/contrib/scalapblib/src/mill/contrib/scalapblib/ScalaPBModule.scala +++ b/contrib/scalapblib/src/mill/contrib/scalapblib/ScalaPBModule.scala @@ -12,9 +12,9 @@ import scala.util.Using /** @see [[http://www.lihaoyi.com/mill/page/contrib-modules.html#scalapb ScalaPB Module]] */ trait ScalaPBModule extends ScalaModule { - override def generatedSources = T { super.generatedSources() :+ compileScalaPB() } + override def generatedSources = Task { super.generatedSources() :+ compileScalaPB() } - override def ivyDeps = T { + override def ivyDeps = Task { super.ivyDeps() ++ Agg(ivy"com.thesamet.scalapb::scalapb-runtime::${scalaPBVersion()}") ++ (if (!scalaPBGrpc()) Agg() @@ -23,16 +23,16 @@ trait ScalaPBModule extends ScalaModule { def scalaPBVersion: T[String] - def scalaPBFlatPackage: T[Boolean] = T { false } + def scalaPBFlatPackage: T[Boolean] = Task { false } - def scalaPBJavaConversions: T[Boolean] = T { false } + def scalaPBJavaConversions: T[Boolean] = Task { false } - def scalaPBGrpc: T[Boolean] = T { true } + def scalaPBGrpc: T[Boolean] = Task { true } - def scalaPBSingleLineToProtoString: T[Boolean] = T { false } + def scalaPBSingleLineToProtoString: T[Boolean] = Task { false } /** ScalaPB enables lenses by default, this option allows you to disable it. */ - def scalaPBLenses: T[Boolean] = T { true } + def scalaPBLenses: T[Boolean] = Task { true } def scalaPBSearchDeps: Boolean = false @@ -47,15 +47,15 @@ trait ScalaPBModule extends ScalaModule { * @return a sequence of Strings representing the additional arguments to append * (defaults to empty Seq[String]). */ - def scalaPBAdditionalArgs: T[Seq[String]] = T { Seq.empty[String] } + def scalaPBAdditionalArgs: T[Seq[String]] = Task { Seq.empty[String] } - def scalaPBProtocPath: T[Option[String]] = T { None } + def scalaPBProtocPath: T[Option[String]] = Task { None } - def scalaPBSources: T[Seq[PathRef]] = T.sources { + def scalaPBSources: T[Seq[PathRef]] = Task.Sources { millSourcePath / "protobuf" } - def scalaPBOptions: T[String] = T { + def scalaPBOptions: T[String] = Task { ( (if (scalaPBFlatPackage()) Seq("flat_package") else Seq.empty) ++ (if (scalaPBJavaConversions()) Seq("java_conversions") else Seq.empty) ++ @@ -72,7 +72,7 @@ trait ScalaPBModule extends ScalaModule { ).mkString(",") } - def scalaPBClasspath: T[Loose.Agg[PathRef]] = T { + def scalaPBClasspath: T[Loose.Agg[PathRef]] = Task { resolveDependencies( repositoriesTask(), Seq(ivy"com.thesamet.scalapb::scalapbc:${scalaPBVersion()}") @@ -80,16 +80,16 @@ trait ScalaPBModule extends ScalaModule { ) } - def scalaPBIncludePath: T[Seq[PathRef]] = T.sources { Seq.empty[PathRef] } + def scalaPBIncludePath: T[Seq[PathRef]] = Task.Sources { Seq.empty[PathRef] } - private def scalaDepsPBIncludePath = if (scalaPBSearchDeps) T { Seq(scalaPBUnpackProto()) } - else T { Seq.empty[PathRef] } + private def scalaDepsPBIncludePath = if (scalaPBSearchDeps) Task { Seq(scalaPBUnpackProto()) } + else Task { Seq.empty[PathRef] } - def scalaPBProtoClasspath: T[Agg[PathRef]] = T { + def scalaPBProtoClasspath: T[Agg[PathRef]] = Task { defaultResolver().resolveDeps(transitiveCompileIvyDeps() ++ transitiveIvyDeps()) } - def scalaPBUnpackProto: T[PathRef] = T { + def scalaPBUnpackProto: T[PathRef] = Task { val cp = scalaPBProtoClasspath() val dest = T.dest cp.iterator.foreach { ref => @@ -118,7 +118,7 @@ trait ScalaPBModule extends ScalaModule { /* * options passing to ScalaPBC **except** `--scala_out=...`, `--proto_path=source_parent` and `source` */ - def scalaPBCompileOptions: T[Seq[String]] = T { + def scalaPBCompileOptions: T[Seq[String]] = Task { ScalaPBWorkerApi.scalaPBWorker().compileOptions( scalaPBProtocPath(), (scalaPBIncludePath() ++ scalaDepsPBIncludePath()).map(_.path), @@ -126,7 +126,7 @@ trait ScalaPBModule extends ScalaModule { ) } - def compileScalaPB: T[PathRef] = T.persistent { + def compileScalaPB: T[PathRef] = Task(persistent = true) { ScalaPBWorkerApi.scalaPBWorker() .compile( scalaPBClasspath(), diff --git a/contrib/scalapblib/src/mill/contrib/scalapblib/ScalaPBWorker.scala b/contrib/scalapblib/src/mill/contrib/scalapblib/ScalaPBWorker.scala index 4472ca77a00..033b1a42d45 100644 --- a/contrib/scalapblib/src/mill/contrib/scalapblib/ScalaPBWorker.scala +++ b/contrib/scalapblib/src/mill/contrib/scalapblib/ScalaPBWorker.scala @@ -4,7 +4,6 @@ package contrib.scalapblib import java.io.File import mill.api.PathRef -import mill.T import mill.define.{Discover, ExternalModule, Worker} class ScalaPBWorker extends AutoCloseable { @@ -126,6 +125,6 @@ trait ScalaPBWorkerApi { } object ScalaPBWorkerApi extends ExternalModule { - def scalaPBWorker: Worker[ScalaPBWorker] = T.worker { new ScalaPBWorker() } + def scalaPBWorker: Worker[ScalaPBWorker] = Task.Worker { new ScalaPBWorker() } lazy val millDiscover: Discover = Discover[this.type] } diff --git a/contrib/scalapblib/test/src/mill/contrib/scalapblib/TutorialTests.scala b/contrib/scalapblib/test/src/mill/contrib/scalapblib/TutorialTests.scala index 9e6af63220f..7e513af3904 100644 --- a/contrib/scalapblib/test/src/mill/contrib/scalapblib/TutorialTests.scala +++ b/contrib/scalapblib/test/src/mill/contrib/scalapblib/TutorialTests.scala @@ -34,7 +34,7 @@ object TutorialTests extends TestSuite { object TutorialWithAdditionalArgs extends TutorialBase { object core extends TutorialModule { - override def scalaPBAdditionalArgs = T { + override def scalaPBAdditionalArgs = Task { Seq( "--additional-test=..." ) @@ -44,7 +44,7 @@ object TutorialTests extends TestSuite { object TutorialWithSpecificSources extends TutorialBase { object core extends TutorialModule { - override def scalaPBSources: T[Seq[PathRef]] = T.sources { + override def scalaPBSources: T[Seq[PathRef]] = Task.Sources { millSourcePath / "protobuf/tutorial/Tutorial.proto" } @@ -55,7 +55,7 @@ object TutorialTests extends TestSuite { } } - val resourcePath: os.Path = os.Path(sys.env("MILL_TEST_RESOURCE_FOLDER")) + val resourcePath: os.Path = os.Path(sys.env("MILL_TEST_RESOURCE_DIR")) def protobufOutPath(eval: UnitTester): os.Path = eval.outPath / "core/compileScalaPB.dest/com/example/tutorial" @@ -83,57 +83,61 @@ object TutorialTests extends TestSuite { test("compileScalaPB") { test("calledDirectly") - UnitTester(Tutorial, resourcePath).scoped { eval => - val Right(result) = eval.apply(Tutorial.core.compileScalaPB) + if (!mill.main.client.Util.isWindows) { + val Right(result) = eval.apply(Tutorial.core.compileScalaPB) - val outPath = protobufOutPath(eval) + val outPath = protobufOutPath(eval) - val outputFiles = os.walk(result.value.path).filter(os.isFile) + val outputFiles = os.walk(result.value.path).filter(os.isFile) - val expectedSourcefiles = compiledSourcefiles.map(outPath / _) + val expectedSourcefiles = compiledSourcefiles.map(outPath / _) - assert( - result.value.path == eval.outPath / "core/compileScalaPB.dest", - outputFiles.nonEmpty, - outputFiles.forall(expectedSourcefiles.contains), - outputFiles.size == 5, - result.evalCount > 0 - ) + assert( + result.value.path == eval.outPath / "core/compileScalaPB.dest", + outputFiles.nonEmpty, + outputFiles.forall(expectedSourcefiles.contains), + outputFiles.size == 5, + result.evalCount > 0 + ) - // don't recompile if nothing changed - val Right(result2) = eval.apply(Tutorial.core.compileScalaPB) + // don't recompile if nothing changed + val Right(result2) = eval.apply(Tutorial.core.compileScalaPB) - assert(result2.evalCount == 0) + assert(result2.evalCount == 0) + } } test("calledWithSpecificFile") - UnitTester( TutorialWithSpecificSources, resourcePath ).scoped { eval => - val Right(result) = eval.apply(TutorialWithSpecificSources.core.compileScalaPB) + if (!mill.main.client.Util.isWindows) { + val Right(result) = eval.apply(TutorialWithSpecificSources.core.compileScalaPB) - val outPath = protobufOutPath(eval) + val outPath = protobufOutPath(eval) - val outputFiles = os.walk(result.value.path).filter(os.isFile) + val outputFiles = os.walk(result.value.path).filter(os.isFile) - val expectedSourcefiles = Seq[os.RelPath]( - os.rel / "AddressBook.scala", - os.rel / "Person.scala", - os.rel / "TutorialProto.scala", - os.rel / "IncludeProto.scala" - ).map(outPath / _) + val expectedSourcefiles = Seq[os.RelPath]( + os.rel / "AddressBook.scala", + os.rel / "Person.scala", + os.rel / "TutorialProto.scala", + os.rel / "IncludeProto.scala" + ).map(outPath / _) - assert( - result.value.path == eval.outPath / "core/compileScalaPB.dest", - outputFiles.nonEmpty, - outputFiles.forall(expectedSourcefiles.contains), - outputFiles.size == 3, - result.evalCount > 0 - ) + assert( + result.value.path == eval.outPath / "core/compileScalaPB.dest", + outputFiles.nonEmpty, + outputFiles.forall(expectedSourcefiles.contains), + outputFiles.size == 3, + result.evalCount > 0 + ) - // don't recompile if nothing changed - val Right(result2) = eval.apply(Tutorial.core.compileScalaPB) + // don't recompile if nothing changed + val Right(result2) = eval.apply(Tutorial.core.compileScalaPB) - assert(result2.evalCount == 0) + assert(result2.evalCount == 0) + } } // // This throws a NullPointerException in coursier somewhere @@ -163,7 +167,7 @@ object TutorialTests extends TestSuite { } test("useExternalProtocCompiler") { - /* This ensure that the `scalaPBProtocPath` is properly used. + /* This ensures that the `scalaPBProtocPath` is properly used. * As the given path is incorrect, the compilation should fail. */ test("calledWithWrongProtocFile") - UnitTester(TutorialWithProtoc, resourcePath).scoped { diff --git a/contrib/scoverage/api/src/mill/contrib/scoverage/api/ScoverageReportWorkerApi.scala b/contrib/scoverage/api/src/mill/contrib/scoverage/api/ScoverageReportWorkerApi.scala deleted file mode 100644 index 55fc01d3dc7..00000000000 --- a/contrib/scoverage/api/src/mill/contrib/scoverage/api/ScoverageReportWorkerApi.scala +++ /dev/null @@ -1,46 +0,0 @@ -package mill.contrib.scoverage.api - -import mill.api.Ctx - -trait ScoverageReportWorkerApi { - import ScoverageReportWorkerApi._ - - @deprecated("Use other overload instead.", "Mill after 0.10.7") - def report( - reportType: ReportType, - sources: Seq[os.Path], - dataDirs: Seq[os.Path] - )(implicit - ctx: Ctx - ): Unit = { - report(reportType, sources, dataDirs, ctx.workspace) - } - - def report( - reportType: ReportType, - sources: Seq[os.Path], - dataDirs: Seq[os.Path], - sourceRoot: os.Path - )(implicit - ctx: Ctx - ): Unit = { - // FIXME: We only call the deprecated version here, to preserve binary compatibility. Remove when appropriate. - ctx.log.error( - "Binary compatibility stub may cause infinite loops with StackOverflowError. You need to implement: def report(ReportType, Seq[Path], Seq[Path], os.Path): Unit" - ) - report(reportType, sources, dataDirs) - } -} - -object ScoverageReportWorkerApi { - sealed trait ReportType - sealed trait FileReportType extends ReportType { def folderName: String } - object ReportType { - final case object Html extends FileReportType { val folderName: String = "htmlReport" } - final case object Xml extends FileReportType { val folderName: String = "xmlReport" } - final case object XmlCobertura extends FileReportType { - val folderName: String = "xmlCoberturaReport" - } - final case object Console extends ReportType - } -} diff --git a/contrib/scoverage/api/src/mill/contrib/scoverage/api/ScoverageReportWorkerApi2.java b/contrib/scoverage/api/src/mill/contrib/scoverage/api/ScoverageReportWorkerApi2.java new file mode 100644 index 00000000000..14dc82d6dd1 --- /dev/null +++ b/contrib/scoverage/api/src/mill/contrib/scoverage/api/ScoverageReportWorkerApi2.java @@ -0,0 +1,99 @@ +package mill.contrib.scoverage.api; + +import java.io.IOException; +import java.io.Serializable; +import java.nio.file.Files; +import java.nio.file.Path; + +public interface ScoverageReportWorkerApi2 { + + interface Logger { + void info(String msg); + + void error(String msg); + + void debug(String msg); + } + + interface Ctx { + Logger log(); + + Path dest(); + } + + public abstract static class ReportType implements Serializable { + private String name; + + /*private[api]*/ + ReportType(String name) {} + + public static final ReportType Console = new ConsoleModule(); + public static final FileReportType Html = new HtmlModule(); + public static final FileReportType Xml = new XmlModule(); + public static final FileReportType XmlCobertura = new XmlCoberturaModule(); + + /* private[api]*/ + static final class ConsoleModule extends ReportType implements Serializable { + /* private[api]*/ + ConsoleModule() { + super("Console"); + } + } + ; + + /* private[api]*/ + static final class HtmlModule extends FileReportType implements Serializable { + /* private[api]*/ + HtmlModule() { + super("Html", "htmlReport"); + } + } + ; + + /* private[api]*/ + static final class XmlModule extends FileReportType implements Serializable { + /* private[api]*/ + XmlModule() { + super("Xml", "xmlReport"); + } + } + + /* private[api]*/ + static final class XmlCoberturaModule extends FileReportType implements Serializable { + /* private[api]*/ + XmlCoberturaModule() { + super("XmlCobertura", "xmlCoberturaReport"); + } + } + + @Override + public String toString() { + return name; + } + } + + public abstract static class FileReportType extends ReportType implements Serializable { + private final String folderName; + + /*private[api]*/ + FileReportType(String name, String folderName) { + super(name); + this.folderName = folderName; + } + + public String folderName() { + return folderName; + } + } + + void report(ReportType reportType, Path[] sources, Path[] dataDirs, Path sourceRoot, Ctx ctx); + + static void makeAllDirs(Path path) throws IOException { + // Replicate behavior of `os.makeDir.all(path)` + if (Files.isDirectory(path) && Files.isSymbolicLink(path)) { + // do nothing + } else { + Files.createDirectories(path); + } + } +} diff --git a/contrib/scoverage/readme.adoc b/contrib/scoverage/readme.adoc index 1a4924a7434..21c302c039f 100644 --- a/contrib/scoverage/readme.adoc +++ b/contrib/scoverage/readme.adoc @@ -19,11 +19,11 @@ import $ivy.`com.lihaoyi::mill-contrib-scoverage:` import mill.contrib.scoverage.ScoverageModule object foo extends ScoverageModule { - def scalaVersion = "2.12.9" - def scoverageVersion = "1.4.0" + def scalaVersion = "2.13.15" + def scoverageVersion = "2.1.1" object test extends ScoverageTests with TestModule.ScalaTest { - def ivyDeps = Agg(ivy"org.scalatest::scalatest:3.0.8") + def ivyDeps = Agg(ivy"org.scalatest::scalatest:3.2.19") } } ---- diff --git a/contrib/scoverage/src/mill/contrib/scoverage/ScoverageModule.scala b/contrib/scoverage/src/mill/contrib/scoverage/ScoverageModule.scala index 5dd43658e05..121899d8bb3 100644 --- a/contrib/scoverage/src/mill/contrib/scoverage/ScoverageModule.scala +++ b/contrib/scoverage/src/mill/contrib/scoverage/ScoverageModule.scala @@ -3,14 +3,12 @@ package mill.contrib.scoverage import coursier.Repository import mill._ import mill.api.{Loose, PathRef, Result} -import mill.contrib.scoverage.api.ScoverageReportWorkerApi.ReportType +import mill.contrib.scoverage.api.ScoverageReportWorkerApi2.ReportType import mill.main.BuildInfo import mill.scalalib.api.ZincWorkerUtil import mill.scalalib.{Dep, DepSyntax, JavaModule, ScalaModule} import mill.util.Util.millProjectModule -import scala.util.Try - /** * Adds targets to a [[mill.scalalib.ScalaModule]] to create test coverage reports. * @@ -19,7 +17,7 @@ import scala.util.Try * [[https://github.com/scoverage/scalac-scoverage-plugin scoverage compiler plugin]]. * * To declare a module for which you want to generate coverage reports you can - * Extends the `mill.contrib.scoverage.ScoverageModule` trait when defining your + * extend the `mill.contrib.scoverage.ScoverageModule` trait when defining your * Module. Additionally, you must define a submodule that extends the * `ScoverageTests` trait that belongs to your instance of `ScoverageModule`. * @@ -29,11 +27,11 @@ import scala.util.Try * import mill.contrib.scoverage.ScoverageModule * * Object foo extends ScoverageModule { - * def scalaVersion = "2.12.9" - * def scoverageVersion = "1.4.0" + * def scalaVersion = "2.13.15" + * def scoverageVersion = "2.1.1" * * object test extends ScoverageTests { - * def ivyDeps = Agg(ivy"org.scalatest::scalatest:3.0.5") + * def ivyDeps = Agg(ivy"org.scalatest::scalatest:3.2.19") * def testFrameworks = Seq("org.scalatest.tools.Framework") * } * } @@ -60,11 +58,9 @@ trait ScoverageModule extends ScalaModule { outer: ScalaModule => */ def scoverageVersion: T[String] - private def isScoverage2: Task[Boolean] = T.task { scoverageVersion().startsWith("2.") } - - private def isScala3: Task[Boolean] = T.task { ZincWorkerUtil.isScala3(outer.scalaVersion()) } + private def isScala3: Task[Boolean] = Task.Anon { ZincWorkerUtil.isScala3(outer.scalaVersion()) } - def scoverageRuntimeDeps: T[Agg[Dep]] = T { + def scoverageRuntimeDeps: T[Agg[Dep]] = Task { if (isScala3()) { Agg.empty } else { @@ -72,89 +68,60 @@ trait ScoverageModule extends ScalaModule { outer: ScalaModule => } } - def scoveragePluginDeps: T[Agg[Dep]] = T { + def scoveragePluginDeps: T[Agg[Dep]] = Task { val sv = scoverageVersion() if (isScala3()) { Agg.empty } else { - if (isScoverage2()) { - Agg( - ivy"org.scoverage:::scalac-scoverage-plugin:${sv}", - ivy"org.scoverage::scalac-scoverage-domain:${sv}", - ivy"org.scoverage::scalac-scoverage-serializer:${sv}", - ivy"org.scoverage::scalac-scoverage-reporter:${sv}" - ) - } else { - Agg(ivy"org.scoverage:::scalac-scoverage-plugin:${sv}") - } + Agg( + ivy"org.scoverage:::scalac-scoverage-plugin:${sv}", + ivy"org.scoverage::scalac-scoverage-domain:${sv}", + ivy"org.scoverage::scalac-scoverage-serializer:${sv}", + ivy"org.scoverage::scalac-scoverage-reporter:${sv}" + ) } } - private def checkVersions = T.task { + private def checkVersions = Task.Anon { val sv = scalaVersion() val isSov2 = scoverageVersion().startsWith("2.") (sv.split('.'), isSov2) match { + case (_, false) => + Result.Failure("Scoverage 1.x is no longer supported. Please use Scoverage 2.x") case (Array("3", "0" | "1", _*), _) => Result.Failure( - "Scala 3.0 and 3.1 is not supported by Scoverage. You have to update to at least Scala 3.2 and Scoverage 2.0" - ) - case (Array("3", _*), false) => Result.Failure( - "Scoverage 1.x does not support Scala 3. You have to update to at least Scala 3.2 and Scoverage 2.0" - ) - case (Array("2", "11", _*), true) => Result.Failure( - "Scoverage 2.x is not compatible with Scala 2.11. Consider using Scoverage 1.x or switch to a newer Scala version." + "Scala 3.0 and 3.1 is not supported by Scoverage. You have to update to at least Scala 3.2" ) case _ => } } - private def scoverageReporterIvyDeps: T[Agg[Dep]] = T { + private def scoverageReporterIvyDeps: T[Agg[Dep]] = Task { checkVersions() val sv = scoverageVersion() val millScalaVersion = BuildInfo.scalaVersion - if (sv.startsWith("1.")) { - // In Scoverage 1.x, the reporting API is included in the plugin jar - val scalaVersion = millScalaVersion.split("[.]", 4).toList.take(3) match { - // Scoverage 1 is not released for Scala > 2.13.8, but we don't need to compiler specific code, - // only the reporter API, which does not depend on the Compiler API, so using another full Scala version - // should be safe - case "2" :: "13" :: c :: _ if Try(c.toInt).getOrElse(0) > 8 => - val v = "2.13.8" - T.log.outputStream.println( - s"Detected an unsupported Scala version (${millScalaVersion}). Using Scala version ${v} to resolve scoverage ${sv} reporting API." - ) - v - case _ => millScalaVersion - } - Agg(ivy"org.scoverage:scalac-scoverage-plugin_${scalaVersion}:${sv}") - } else { - // we need to resolve with same Scala version used for Mill, not the project Scala version - val scalaBinVersion = ZincWorkerUtil.scalaBinaryVersion(millScalaVersion) - // In Scoverage 2.x, the reporting API is no longer bundled in the plugin jar - Agg( - ivy"org.scoverage:scalac-scoverage-domain_${scalaBinVersion}:${sv}", - ivy"org.scoverage:scalac-scoverage-serializer_${scalaBinVersion}:${sv}", - ivy"org.scoverage:scalac-scoverage-reporter_${scalaBinVersion}:${sv}" - ) - } + // we need to resolve with same Scala version used for Mill, not the project Scala version + val scalaBinVersion = ZincWorkerUtil.scalaBinaryVersion(millScalaVersion) + // In Scoverage 2.x, the reporting API is no longer bundled in the plugin jar + Agg( + ivy"org.scoverage:scalac-scoverage-domain_${scalaBinVersion}:${sv}", + ivy"org.scoverage:scalac-scoverage-serializer_${scalaBinVersion}:${sv}", + ivy"org.scoverage:scalac-scoverage-reporter_${scalaBinVersion}:${sv}" + ) } - def scoverageToolsClasspath: T[Agg[PathRef]] = T { + def scoverageToolsClasspath: T[Agg[PathRef]] = Task { scoverageReportWorkerClasspath() ++ defaultResolver().resolveDeps(scoverageReporterIvyDeps()) } - def scoverageClasspath: T[Agg[PathRef]] = T { + def scoverageClasspath: T[Agg[PathRef]] = Task { defaultResolver().resolveDeps(scoveragePluginDeps()) } - def scoverageReportWorkerClasspath: T[Agg[PathRef]] = T { - val isScov2 = isScoverage2() - - val workerArtifact = - if (isScov2) "mill-contrib-scoverage-worker2" - else "mill-contrib-scoverage-worker" + def scoverageReportWorkerClasspath: T[Agg[PathRef]] = Task { + val workerArtifact = "mill-contrib-scoverage-worker2" millProjectModule( workerArtifact, @@ -168,7 +135,7 @@ trait ScoverageModule extends ScalaModule { outer: ScalaModule => trait ScoverageData extends ScalaModule { - def doReport(reportType: ReportType): Task[Unit] = T.task { + def doReport(reportType: ReportType): Task[Unit] = Task.Anon { ScoverageReportWorker .scoverageReportWorker() .bridge(scoverageToolsClasspath()) @@ -179,48 +146,52 @@ trait ScoverageModule extends ScalaModule { outer: ScalaModule => * The persistent data dir used to store scoverage coverage data. * Use to store coverage data at compile-time and by the various report targets. */ - def data: T[PathRef] = T.persistent { + def data: T[PathRef] = Task(persistent = true) { // via the persistent target, we ensure, the dest dir doesn't get cleared PathRef(T.dest) } override def compileResources: T[Seq[PathRef]] = outer.compileResources - override def generatedSources: Target[Seq[PathRef]] = T { outer.generatedSources() } - override def allSources: Target[Seq[PathRef]] = T { outer.allSources() } + override def generatedSources: T[Seq[PathRef]] = Task { outer.generatedSources() } + override def allSources: T[Seq[PathRef]] = Task { outer.allSources() } override def moduleDeps: Seq[JavaModule] = outer.moduleDeps override def compileModuleDeps: Seq[JavaModule] = outer.compileModuleDeps - override def sources: T[Seq[PathRef]] = T.sources { outer.sources() } - override def resources: T[Seq[PathRef]] = T.sources { outer.resources() } - override def scalaVersion = T { outer.scalaVersion() } - override def repositoriesTask: Task[Seq[Repository]] = T.task { outer.repositoriesTask() } - override def compileIvyDeps: Target[Agg[Dep]] = T { outer.compileIvyDeps() } - override def ivyDeps: Target[Agg[Dep]] = - T { outer.ivyDeps() ++ outer.scoverageRuntimeDeps() } - override def unmanagedClasspath: Target[Agg[PathRef]] = T { outer.unmanagedClasspath() } + override def sources: T[Seq[PathRef]] = Task { outer.sources() } + override def resources: T[Seq[PathRef]] = Task { outer.resources() } + override def scalaVersion = Task { outer.scalaVersion() } + override def repositoriesTask: Task[Seq[Repository]] = Task.Anon { outer.repositoriesTask() } + override def compileIvyDeps: T[Agg[Dep]] = Task { outer.compileIvyDeps() } + override def ivyDeps: T[Agg[Dep]] = + Task { outer.ivyDeps() ++ outer.scoverageRuntimeDeps() } + override def unmanagedClasspath: T[Agg[PathRef]] = Task { outer.unmanagedClasspath() } /** Add the scoverage scalac plugin. */ - override def scalacPluginIvyDeps: Target[Loose.Agg[Dep]] = - T { outer.scalacPluginIvyDeps() ++ outer.scoveragePluginDeps() } + override def scalacPluginIvyDeps: T[Loose.Agg[Dep]] = + Task { outer.scalacPluginIvyDeps() ++ outer.scoveragePluginDeps() } /** Add the scoverage specific plugin settings (`dataDir`). */ - override def scalacOptions: Target[Seq[String]] = - T { + override def scalacOptions: T[Seq[String]] = + Task { val extras = if (isScala3()) { - Seq(s"-coverage-out:${data().path.toIO.getPath()}") + Seq( + s"-coverage-out:${data().path.toIO.getPath()}", + s"-sourceroot:${T.workspace}" + ) } else { - val base = s"-P:scoverage:dataDir:${data().path.toIO.getPath()}" - if (isScoverage2()) Seq(base, s"-P:scoverage:sourceRoot:${T.workspace}") - else Seq(base) + Seq( + s"-P:scoverage:dataDir:${data().path.toIO.getPath()}", + s"-P:scoverage:sourceRoot:${T.workspace}" + ) } outer.scalacOptions() ++ extras } - def htmlReport(): Command[Unit] = T.command { doReport(ReportType.Html)() } - def xmlReport(): Command[Unit] = T.command { doReport(ReportType.Xml)() } - def xmlCoberturaReport(): Command[Unit] = T.command { doReport(ReportType.XmlCobertura)() } - def consoleReport(): Command[Unit] = T.command { doReport(ReportType.Console)() } + def htmlReport(): Command[Unit] = Task.Command { doReport(ReportType.Html)() } + def xmlReport(): Command[Unit] = Task.Command { doReport(ReportType.Xml)() } + def xmlCoberturaReport(): Command[Unit] = Task.Command { doReport(ReportType.XmlCobertura)() } + def consoleReport(): Command[Unit] = Task.Command { doReport(ReportType.Console)() } override def skipIdea = true } @@ -232,7 +203,7 @@ trait ScoverageModule extends ScalaModule { outer: ScalaModule => * classes folder by the outer.scoverage classes folder and adding the * scoverage runtime dependency. */ - override def runClasspath: T[Seq[PathRef]] = T { + override def runClasspath: T[Seq[PathRef]] = Task { val outerClassesPath = outer.compile().classes val outerScoverageClassesPath = outer.scoverage.compile().classes (super.runClasspath().map { path => diff --git a/contrib/scoverage/src/mill/contrib/scoverage/ScoverageReport.scala b/contrib/scoverage/src/mill/contrib/scoverage/ScoverageReport.scala index a89de439996..e7c6ed5670f 100644 --- a/contrib/scoverage/src/mill/contrib/scoverage/ScoverageReport.scala +++ b/contrib/scoverage/src/mill/contrib/scoverage/ScoverageReport.scala @@ -1,6 +1,6 @@ package mill.contrib.scoverage -import mill.contrib.scoverage.api.ScoverageReportWorkerApi.ReportType +import mill.contrib.scoverage.api.ScoverageReportWorkerApi2.ReportType import mill.define.{Command, Module, Task} import mill.eval.Evaluator import mill.resolve.{Resolve, SelectMode} @@ -53,7 +53,7 @@ trait ScoverageReport extends Module { evaluator: Evaluator, sources: String = "__.allSources", dataTargets: String = "__.scoverage.data" - ): Command[PathRef] = T.command { + ): Command[PathRef] = Task.Command { reportTask(evaluator, ReportType.Html, sources, dataTargets)() } @@ -62,7 +62,7 @@ trait ScoverageReport extends Module { evaluator: Evaluator, sources: String = "__.allSources", dataTargets: String = "__.scoverage.data" - ): Command[PathRef] = T.command { + ): Command[PathRef] = Task.Command { reportTask(evaluator, ReportType.Xml, sources, dataTargets)() } @@ -71,7 +71,7 @@ trait ScoverageReport extends Module { evaluator: Evaluator, sources: String = "__.allSources", dataTargets: String = "__.scoverage.data" - ): Command[PathRef] = T.command { + ): Command[PathRef] = Task.Command { reportTask(evaluator, ReportType.XmlCobertura, sources, dataTargets)() } @@ -80,7 +80,7 @@ trait ScoverageReport extends Module { evaluator: Evaluator, sources: String = "__.allSources", dataTargets: String = "__.scoverage.data" - ): Command[PathRef] = T.command { + ): Command[PathRef] = Task.Command { reportTask(evaluator, ReportType.Console, sources, dataTargets)() } @@ -107,7 +107,7 @@ trait ScoverageReport extends Module { case Right(tasks) => tasks.asInstanceOf[Seq[Task[PathRef]]] } - T.task { + Task.Anon { val sourcePaths: Seq[Path] = T.sequence(sourcesTasks)().flatten.map(_.path) val dataPaths: Seq[Path] = T.sequence(dataTasks)().map(_.path) scoverageReportWorkerModule diff --git a/contrib/scoverage/src/mill/contrib/scoverage/ScoverageReportWorker.scala b/contrib/scoverage/src/mill/contrib/scoverage/ScoverageReportWorker.scala index e9fb93b2722..600c90d3bb9 100644 --- a/contrib/scoverage/src/mill/contrib/scoverage/ScoverageReportWorker.scala +++ b/contrib/scoverage/src/mill/contrib/scoverage/ScoverageReportWorker.scala @@ -1,14 +1,19 @@ package mill.contrib.scoverage -import mill.{Agg, T} +import mill.{Agg, Task} import mill.api.{ClassLoader, Ctx, PathRef} -import mill.contrib.scoverage.api.ScoverageReportWorkerApi +import mill.contrib.scoverage.api.ScoverageReportWorkerApi2 import mill.define.{Discover, ExternalModule, Worker} +import ScoverageReportWorker.ScoverageReportWorkerApiBridge +import ScoverageReportWorkerApi2.ReportType +import ScoverageReportWorkerApi2.{Logger => ApiLogger} +import ScoverageReportWorkerApi2.{Ctx => ApiCtx} + class ScoverageReportWorker extends AutoCloseable { - private[this] var scoverageClCache = Option.empty[(Long, ClassLoader)] + private var scoverageClCache = Option.empty[(Long, ClassLoader)] - def bridge(classpath: Agg[PathRef])(implicit ctx: Ctx): ScoverageReportWorkerApi = { + def bridge(classpath: Agg[PathRef])(implicit ctx: Ctx): ScoverageReportWorkerApiBridge = { val classloaderSig = classpath.hashCode val cl = scoverageClCache match { @@ -24,11 +29,43 @@ class ScoverageReportWorker extends AutoCloseable { cl } - cl - .loadClass("mill.contrib.scoverage.worker.ScoverageReportWorkerImpl") - .getDeclaredConstructor() - .newInstance() - .asInstanceOf[api.ScoverageReportWorkerApi] + val worker = + cl + .loadClass("mill.contrib.scoverage.worker.ScoverageReportWorkerImpl") + .getDeclaredConstructor() + .newInstance() + .asInstanceOf[api.ScoverageReportWorkerApi2] + + def ctx0(implicit ctx: Ctx): ApiCtx = { + val logger = new ApiLogger { + def info(msg: String): Unit = ctx.log.info(msg) + def error(msg: String): Unit = ctx.log.error(msg) + def debug(msg: String): Unit = ctx.log.debug(msg) + } + new ApiCtx { + def log() = logger + def dest() = ctx.dest.toNIO + } + } + + new ScoverageReportWorkerApiBridge { + override def report( + reportType: ReportType, + sources: Seq[os.Path], + dataDirs: Seq[os.Path], + sourceRoot: os.Path + )(implicit + ctx: Ctx + ): Unit = { + worker.report( + reportType, + sources.map(_.toNIO).toArray, + dataDirs.map(_.toNIO).toArray, + sourceRoot.toNIO, + ctx0 + ) + } + } } override def close(): Unit = { @@ -37,8 +74,20 @@ class ScoverageReportWorker extends AutoCloseable { } object ScoverageReportWorker extends ExternalModule { + import ScoverageReportWorkerApi2.ReportType + + trait ScoverageReportWorkerApiBridge { + def report( + reportType: ReportType, + sources: Seq[os.Path], + dataDirs: Seq[os.Path], + sourceRoot: os.Path + )(implicit + ctx: Ctx + ): Unit + } def scoverageReportWorker: Worker[ScoverageReportWorker] = - T.worker { new ScoverageReportWorker() } + Task.Worker { new ScoverageReportWorker() } lazy val millDiscover: Discover = Discover[this.type] } diff --git a/contrib/scoverage/test/src/mill/contrib/scoverage/HelloWorldTests.scala b/contrib/scoverage/test/src/mill/contrib/scoverage/HelloWorldTests.scala index 887a3b66bb4..f2bd5744263 100644 --- a/contrib/scoverage/test/src/mill/contrib/scoverage/HelloWorldTests.scala +++ b/contrib/scoverage/test/src/mill/contrib/scoverage/HelloWorldTests.scala @@ -21,7 +21,7 @@ trait HelloWorldTests extends utest.TestSuite { def isScala3: Boolean = testScalaVersion.startsWith("3.") def isScov3: Boolean = testScoverageVersion.startsWith("2.") - val resourcePath = os.Path(sys.env("MILL_TEST_RESOURCE_FOLDER")) / "hello-world" + val resourcePath = os.Path(sys.env("MILL_TEST_RESOURCE_DIR")) / "hello-world" val sbtResourcePath = resourcePath / os.up / "hello-world-sbt" val unmanagedFile = resourcePath / "unmanaged.xml" @@ -302,18 +302,6 @@ trait FailedWorldTests extends HelloWorldTests { } } -object Scoverage1Tests_2_12 extends HelloWorldTests { - override def testScalaVersion: String = sys.props.getOrElse("TEST_SCALA_2_12_VERSION", ???) - override def testScoverageVersion = sys.props.getOrElse("MILL_SCOVERAGE_VERSION", ???) -} - -object Scoverage1Tests_2_13 extends HelloWorldTests { - // scaoverage 1.x was only released for Scala up to 2.13.8 - override def testScalaVersion: String = - sys.props.getOrElse("TEST_SCALA_2_13_VERSION_FOR_SCOVERAGE_1", ???) - override def testScoverageVersion = sys.props.getOrElse("MILL_SCOVERAGE_VERSION", ???) -} - object Scoverage2Tests_2_13 extends HelloWorldTests { override def testScalaVersion: String = sys.props.getOrElse("TEST_SCALA_2_13_VERSION", ???) override def testScoverageVersion = sys.props.getOrElse("MILL_SCOVERAGE2_VERSION", ???) @@ -323,10 +311,3 @@ object Scoverage2Tests_3_2 extends HelloWorldTests { override def testScalaVersion: String = sys.props.getOrElse("TEST_SCALA_3_2_VERSION", ???) override def testScoverageVersion = sys.props.getOrElse("MILL_SCOVERAGE2_VERSION", ???) } - -object Scoverage1Tests_3_2 extends FailedWorldTests { - override def testScalaVersion: String = sys.props.getOrElse("TEST_SCALA_3_2_VERSION", ???) - override def testScoverageVersion = sys.props.getOrElse("MILL_SCOVERAGE_VERSION", ???) - override val errorMsg = - "Scoverage 1.x does not support Scala 3. You have to update to at least Scala 3.2 and Scoverage 2.0" -} diff --git a/contrib/scoverage/worker/src/mill/contrib/scoverage/worker/ScoverageReportWorkerImpl.scala b/contrib/scoverage/worker/src/mill/contrib/scoverage/worker/ScoverageReportWorkerImpl.scala deleted file mode 100644 index 836c0387f17..00000000000 --- a/contrib/scoverage/worker/src/mill/contrib/scoverage/worker/ScoverageReportWorkerImpl.scala +++ /dev/null @@ -1,55 +0,0 @@ -package mill.contrib.scoverage.worker - -import mill.contrib.scoverage.api.ScoverageReportWorkerApi -import _root_.scoverage.report.{ - CoberturaXmlWriter, - CoverageAggregator, - ScoverageHtmlWriter, - ScoverageXmlWriter -} -import mill.api.Ctx -import mill.contrib.scoverage.api.ScoverageReportWorkerApi.ReportType - -/** - * Scoverage Worker for Scoverage 1.x - */ -class ScoverageReportWorkerImpl extends ScoverageReportWorkerApi { - - override def report( - reportType: ReportType, - sources: Seq[os.Path], - dataDirs: Seq[os.Path], - // ignored in Scoverage 1.x - sourceRoot: os.Path - )(implicit ctx: Ctx): Unit = - try { - ctx.log.info(s"Processing coverage data for ${dataDirs.size} data locations") - CoverageAggregator.aggregate(dataDirs.map(_.toIO)) match { - case Some(coverage) => - val sourceFolders = sources.map(_.toIO) - val folder = ctx.dest - os.makeDir.all(folder) - reportType match { - case ReportType.Html => - new ScoverageHtmlWriter(sourceFolders, folder.toIO, None) - .write(coverage) - case ReportType.Xml => - new ScoverageXmlWriter(sourceFolders, folder.toIO, false) - .write(coverage) - case ReportType.XmlCobertura => - new CoberturaXmlWriter(sourceFolders, folder.toIO) - .write(coverage) - case ReportType.Console => - ctx.log.info(s"Statement coverage.: ${coverage.statementCoverageFormatted}%") - ctx.log.info(s"Branch coverage....: ${coverage.branchCoverageFormatted}%") - } - case None => - ctx.log.error(s"No coverage data found in [${dataDirs.mkString(", ")}]") - } - } catch { - case e: Throwable => - ctx.log.error(s"Exception while building coverage report. ${e.getMessage()}") - e.printStackTrace() - throw e - } -} diff --git a/contrib/scoverage/worker2/src/mill/contrib/scoverage/worker/ScoverageReportWorkerImpl.scala b/contrib/scoverage/worker2/src/mill/contrib/scoverage/worker/ScoverageReportWorkerImpl.scala index a478a91c533..1393af627f1 100644 --- a/contrib/scoverage/worker2/src/mill/contrib/scoverage/worker/ScoverageReportWorkerImpl.scala +++ b/contrib/scoverage/worker2/src/mill/contrib/scoverage/worker/ScoverageReportWorkerImpl.scala @@ -1,42 +1,46 @@ package mill.contrib.scoverage.worker -import mill.contrib.scoverage.api.ScoverageReportWorkerApi import _root_.scoverage.reporter.{ CoberturaXmlWriter, CoverageAggregator, ScoverageHtmlWriter, ScoverageXmlWriter } -import mill.api.Ctx -import mill.contrib.scoverage.api.ScoverageReportWorkerApi.ReportType + +import mill.contrib.scoverage.api.ScoverageReportWorkerApi2 +import ScoverageReportWorkerApi2.ReportType +import ScoverageReportWorkerApi2.Ctx + +import java.nio.file.Path /** * Scoverage Worker for Scoverage 2.x */ -class ScoverageReportWorkerImpl extends ScoverageReportWorkerApi { +class ScoverageReportWorkerImpl extends ScoverageReportWorkerApi2 { override def report( reportType: ReportType, - sources: Seq[os.Path], - dataDirs: Seq[os.Path], - sourceRoot: os.Path - )(implicit ctx: Ctx): Unit = + sources: Array[Path], + dataDirs: Array[Path], + sourceRoot: Path, + ctx: Ctx + ): Unit = try { ctx.log.info(s"Processing coverage data for ${dataDirs.size} data locations") - CoverageAggregator.aggregate(dataDirs.map(_.toIO), sourceRoot.toIO) match { + CoverageAggregator.aggregate(dataDirs.map(_.toFile).toIndexedSeq, sourceRoot.toFile) match { case Some(coverage) => - val sourceFolders = sources.map(_.toIO) + val sourceFolders = sources.map(_.toFile).toIndexedSeq val folder = ctx.dest - os.makeDir.all(folder) + ScoverageReportWorkerApi2.makeAllDirs(folder) reportType match { case ReportType.Html => - new ScoverageHtmlWriter(sourceFolders, folder.toIO, None) + new ScoverageHtmlWriter(sourceFolders, folder.toFile, None) .write(coverage) case ReportType.Xml => - new ScoverageXmlWriter(sourceFolders, folder.toIO, false, None) + new ScoverageXmlWriter(sourceFolders, folder.toFile, false, None) .write(coverage) case ReportType.XmlCobertura => - new CoberturaXmlWriter(sourceFolders, folder.toIO, None) + new CoberturaXmlWriter(sourceFolders, folder.toFile, None) .write(coverage) case ReportType.Console => ctx.log.info(s"Statement coverage.: ${coverage.statementCoverageFormatted}%") diff --git a/contrib/sonatypecentral/readme.adoc b/contrib/sonatypecentral/readme.adoc index 9ee00bbe145..27ea99b07d4 100644 --- a/contrib/sonatypecentral/readme.adoc +++ b/contrib/sonatypecentral/readme.adoc @@ -29,7 +29,7 @@ $ mill -i \ mill.contrib.sonatypecentral.SonatypeCentralPublishModule/publishAll \ --username myusername \ --password mypassword \ ---gpgArgs --passphrase=$GPG_PASSPHRASE,--no-tty,--pinentry-mode,loopback,--batch,--yes,-a,-b \ +--gpgArgs --passphrase=$MILL_PGP_PASSPHRASE,--no-tty,--pinentry-mode,loopback,--batch,--yes,-a,-b \ --publishArtifacts __.publishArtifacts \ --readTimeout 36000 \ --awaitTimeout 36000 \ @@ -68,7 +68,7 @@ The `mill.contrib.sonatypecentral.SonatypeCentralPublishModule/publishAll` metho `password`: The password for calling the Sonatype Central publishing api. Defaults to the `SONATYPE_PASSWORD` environment variable if unset. If neither the parameter nor the environment variable are set, an error will be thrown. + -`gpgArgs`: Arguments to pass to the gpg package for signing artifacts. _Default: `--batch, --yes, -a, -b`._ + +`gpgArgs`: Arguments to pass to the gpg package for signing artifacts. Uses the `MILL_PGP_PASSPHRASE` environment variable if set. _Default: `[--passphrase=$MILL_PGP_PASSPHRASE], --no-tty, --pinentry-mode, loopback, --batch, --yes, -a, -b`._ + `publishArtifacts`: The command for generating all publishable artifacts (ex. `__.publishArtifacts`). Required. + diff --git a/contrib/sonatypecentral/src/mill/contrib/sonatypecentral/SonatypeCentralPublishModule.scala b/contrib/sonatypecentral/src/mill/contrib/sonatypecentral/SonatypeCentralPublishModule.scala index 7158c9bc87d..fd5fc26f386 100644 --- a/contrib/sonatypecentral/src/mill/contrib/sonatypecentral/SonatypeCentralPublishModule.scala +++ b/contrib/sonatypecentral/src/mill/contrib/sonatypecentral/SonatypeCentralPublishModule.scala @@ -13,7 +13,6 @@ import mill.contrib.sonatypecentral.SonatypeCentralPublishModule.{ getPublishingTypeFromReleaseFlag, getSonatypeCredentials } -import mill.scalalib.PublishModule.defaultGpgArgs import mill.scalalib.publish.Artifact import mill.scalalib.publish.SonatypeHelpers.{ PASSWORD_ENV_VARIABLE_NAME, @@ -21,21 +20,23 @@ import mill.scalalib.publish.SonatypeHelpers.{ } trait SonatypeCentralPublishModule extends PublishModule { - def sonatypeCentralGpgArgs: T[String] = T { defaultGpgArgs.mkString(",") } + def sonatypeCentralGpgArgs: T[String] = Task { + PublishModule.defaultGpgArgsForPassphrase(T.env.get("MILL_PGP_PASSPHRASE")).mkString(",") + } - def sonatypeCentralConnectTimeout: T[Int] = T { defaultConnectTimeout } + def sonatypeCentralConnectTimeout: T[Int] = Task { defaultConnectTimeout } - def sonatypeCentralReadTimeout: T[Int] = T { defaultReadTimeout } + def sonatypeCentralReadTimeout: T[Int] = Task { defaultReadTimeout } - def sonatypeCentralAwaitTimeout: T[Int] = T { defaultAwaitTimeout } + def sonatypeCentralAwaitTimeout: T[Int] = Task { defaultAwaitTimeout } - def sonatypeCentralShouldRelease: T[Boolean] = T { true } + def sonatypeCentralShouldRelease: T[Boolean] = Task { true } def publishSonatypeCentral( username: String = defaultCredentials, password: String = defaultCredentials ): define.Command[Unit] = - T.command { + Task.Command { val publishData = publishArtifacts() val fileMapping = publishData.withConcretePath._1 val artifact = publishData.meta @@ -43,10 +44,7 @@ trait SonatypeCentralPublishModule extends PublishModule { PublishModule.pgpImportSecretIfProvided(T.env) val publisher = new SonatypeCentralPublisher( credentials = finalCredentials, - gpgArgs = sonatypeCentralGpgArgs() match { - case "" => PublishModule.defaultGpgArgsForPassphrase(T.env.get("PGP_PASSPHRASE")) - case gpgArgs => gpgArgs.split(",").toIndexedSeq - }, + gpgArgs = sonatypeCentralGpgArgs().split(",").toIndexedSeq, connectTimeout = sonatypeCentralConnectTimeout(), readTimeout = sonatypeCentralReadTimeout(), log = T.log, @@ -75,12 +73,12 @@ object SonatypeCentralPublishModule extends ExternalModule { username: String = defaultCredentials, password: String = defaultCredentials, shouldRelease: Boolean = defaultShouldRelease, - gpgArgs: String = defaultGpgArgs.mkString(","), + gpgArgs: String = "", readTimeout: Int = defaultReadTimeout, connectTimeout: Int = defaultConnectTimeout, awaitTimeout: Int = defaultAwaitTimeout, bundleName: String = "" - ): Command[Unit] = T.command { + ): Command[Unit] = Task.Command { val artifacts: Seq[(Seq[(os.Path, String)], Artifact)] = T.sequence(publishArtifacts.value)().map { @@ -93,7 +91,7 @@ object SonatypeCentralPublishModule extends ExternalModule { val publisher = new SonatypeCentralPublisher( credentials = finalCredentials, gpgArgs = gpgArgs match { - case "" => PublishModule.defaultGpgArgsForPassphrase(T.env.get("PGP_PASSPHRASE")) + case "" => PublishModule.defaultGpgArgsForPassphrase(T.env.get("MILL_PGP_PASSPHRASE")) case gpgArgs => gpgArgs.split(",").toIndexedSeq }, connectTimeout = connectTimeout, @@ -122,7 +120,7 @@ object SonatypeCentralPublishModule extends ExternalModule { credentialParameterValue: String, credentialName: String, envVariableName: String - ): Task[String] = T.task { + ): Task[String] = Task.Anon { if (credentialParameterValue.nonEmpty) { Result.Success(credentialParameterValue) } else { @@ -141,7 +139,7 @@ object SonatypeCentralPublishModule extends ExternalModule { private def getSonatypeCredentials( usernameParameterValue: String, passwordParameterValue: String - ): Task[SonatypeCredentials] = T.task { + ): Task[SonatypeCredentials] = Task.Anon { val username = getSonatypeCredential(usernameParameterValue, "username", USERNAME_ENV_VARIABLE_NAME)() val password = diff --git a/contrib/sonatypecentral/src/mill/contrib/sonatypecentral/SonatypeCentralPublisher.scala b/contrib/sonatypecentral/src/mill/contrib/sonatypecentral/SonatypeCentralPublisher.scala index f8e7f0b3d29..edfa92c5a5c 100644 --- a/contrib/sonatypecentral/src/mill/contrib/sonatypecentral/SonatypeCentralPublisher.scala +++ b/contrib/sonatypecentral/src/mill/contrib/sonatypecentral/SonatypeCentralPublisher.scala @@ -10,7 +10,7 @@ import mill.api.Logger import mill.scalalib.publish.Artifact import mill.scalalib.publish.SonatypeHelpers.getArtifactMappings -import java.io.FileOutputStream +import java.nio.file.Files import java.util.jar.JarOutputStream import java.util.zip.ZipEntry @@ -110,15 +110,15 @@ class SonatypeCentralPublisher( wd: os.Path )(func: JarOutputStream => Unit): java.io.File = { val zipFile = - (wd / s"$fileNameWithoutExtension.zip").toIO - val fileOutputStream = new FileOutputStream(zipFile) + (wd / s"$fileNameWithoutExtension.zip") + val fileOutputStream = Files.newOutputStream(zipFile.toNIO) val jarOutputStream = new JarOutputStream(fileOutputStream) try { func(jarOutputStream) } finally { jarOutputStream.close() } - zipFile + zipFile.toIO } private def zipFilesToJar( diff --git a/contrib/testng/readme.adoc b/contrib/testng/readme.adoc index f28d6b07b03..baa12bb3a77 100644 --- a/contrib/testng/readme.adoc +++ b/contrib/testng/readme.adoc @@ -2,7 +2,7 @@ :page-aliases: TestNG_TestFramework.adoc -Provides support for https://testng.org/doc/index.html[TestNG]. +Provides support for https://testng.org[TestNG]. To use TestNG as test framework, you need to add it to the `TestModule.testFramework` property. diff --git a/contrib/testng/src/mill/testng/ResultEvent.java b/contrib/testng/src/mill/testng/ResultEvent.java index 6e2a50d6954..46be47a63b2 100644 --- a/contrib/testng/src/mill/testng/ResultEvent.java +++ b/contrib/testng/src/mill/testng/ResultEvent.java @@ -1,45 +1,55 @@ - package mill.testng; -import sbt.testing.*; import org.testng.ITestResult; +import sbt.testing.*; public class ResultEvent { - static Event failure(ITestResult result){ return event(Status.Failure, result); } - static Event skipped(ITestResult result){ return event(Status.Skipped, result); } - static Event success(ITestResult result){ return event(Status.Success, result); } - - static Event event(Status result, ITestResult testNGResult) { - return new Event() { - public String fullyQualifiedName() { - return testNGResult.getTestClass().getName(); - } - - public Fingerprint fingerprint() { - return TestNGFingerprint.instance; - } - - public Selector selector() { - return new SuiteSelector(); - } - - public Status status() { - return result; - } - - public OptionalThrowable throwable() { - if (result != Status.Success){ - return new OptionalThrowable(testNGResult.getThrowable()); - }else { - return new OptionalThrowable(); - } - } - - @Override - public long duration() { - return testNGResult.getEndMillis() - testNGResult.getStartMillis(); - } - }; - } - static String classNameOf(ITestResult result){ return result.getTestClass().getName(); } -} \ No newline at end of file + static Event failure(ITestResult result) { + return event(Status.Failure, result); + } + + static Event skipped(ITestResult result) { + return event(Status.Skipped, result); + } + + static Event success(ITestResult result) { + return event(Status.Success, result); + } + + static Event event(Status result, ITestResult testNGResult) { + return new Event() { + public String fullyQualifiedName() { + return testNGResult.getTestClass().getName(); + } + + public Fingerprint fingerprint() { + return TestNGFingerprint.instance; + } + + public Selector selector() { + return new SuiteSelector(); + } + + public Status status() { + return result; + } + + public OptionalThrowable throwable() { + if (result != Status.Success) { + return new OptionalThrowable(testNGResult.getThrowable()); + } else { + return new OptionalThrowable(); + } + } + + @Override + public long duration() { + return testNGResult.getEndMillis() - testNGResult.getStartMillis(); + } + }; + } + + static String classNameOf(ITestResult result) { + return result.getTestClass().getName(); + } +} diff --git a/contrib/testng/src/mill/testng/TestNGFramework.java b/contrib/testng/src/mill/testng/TestNGFramework.java index a5aeb286164..d388e78315f 100644 --- a/contrib/testng/src/mill/testng/TestNGFramework.java +++ b/contrib/testng/src/mill/testng/TestNGFramework.java @@ -7,23 +7,29 @@ public class TestNGFramework implements Framework { - public String name(){ return "TestNG"; } - - public Fingerprint[] fingerprints() { - return new Fingerprint[]{TestNGFingerprint.instance}; - } - - @Override - public Runner runner(String[] args, String[] remoteArgs, ClassLoader classLoader) { - return new TestNGRunner(args, remoteArgs, classLoader); - } + public String name() { + return "TestNG"; + } + + public Fingerprint[] fingerprints() { + return new Fingerprint[] {TestNGFingerprint.instance}; + } + + @Override + public Runner runner(String[] args, String[] remoteArgs, ClassLoader classLoader) { + return new TestNGRunner(args, remoteArgs, classLoader); + } } -class TestNGFingerprint implements AnnotatedFingerprint{ +class TestNGFingerprint implements AnnotatedFingerprint { - public static final TestNGFingerprint instance = new TestNGFingerprint(); + public static final TestNGFingerprint instance = new TestNGFingerprint(); - public String annotationName(){return "org.testng.annotations.Test";} + public String annotationName() { + return "org.testng.annotations.Test"; + } - public boolean isModule(){return false;} + public boolean isModule() { + return false; + } } diff --git a/contrib/testng/src/mill/testng/TestNGInstance.java b/contrib/testng/src/mill/testng/TestNGInstance.java index 709ad4281ed..48832dc675e 100644 --- a/contrib/testng/src/mill/testng/TestNGInstance.java +++ b/contrib/testng/src/mill/testng/TestNGInstance.java @@ -1,8 +1,6 @@ package mill.testng; - import java.util.Arrays; - import org.testng.CommandLineArgs; import org.testng.ITestContext; import org.testng.ITestListener; @@ -11,71 +9,70 @@ import sbt.testing.EventHandler; import sbt.testing.Logger; -class TestNGListener implements ITestListener{ +class TestNGListener implements ITestListener { - private final EventHandler basket; - private final boolean printEnabled; + private final EventHandler basket; + private final boolean printEnabled; - private String lastName = ""; + private String lastName = ""; - public TestNGListener(EventHandler basket){ - this.basket = basket; - String prop = System.getProperty("mill.testng.printProgress", "1"); - this.printEnabled = Arrays.asList("1", "y", "yes", "true").contains(prop); - } + public TestNGListener(EventHandler basket) { + this.basket = basket; + String prop = System.getProperty("mill.testng.printProgress", "1"); + this.printEnabled = Arrays.asList("1", "y", "yes", "true").contains(prop); + } - public void onTestStart(ITestResult iTestResult) { - String newName = iTestResult.getTestClass().getName() + " " + iTestResult.getName() + " "; - if(!newName.equals(lastName)){ - if (!lastName.equals("")){ - System.out.println(); - } - lastName = newName; - System.out.print(lastName); - } + public void onTestStart(ITestResult iTestResult) { + String newName = iTestResult.getTestClass().getName() + " " + iTestResult.getName() + " "; + if (!newName.equals(lastName)) { + if (!lastName.equals("")) { + System.out.println(); + } + lastName = newName; + System.out.print(lastName); } + } - public void onTestSuccess(ITestResult iTestResult) { - printProgress('+'); - basket.handle(ResultEvent.success(iTestResult)); - } + public void onTestSuccess(ITestResult iTestResult) { + printProgress('+'); + basket.handle(ResultEvent.success(iTestResult)); + } - public void onTestFailure(ITestResult iTestResult) { - printProgress('X'); - basket.handle(ResultEvent.failure(iTestResult)); - } + public void onTestFailure(ITestResult iTestResult) { + printProgress('X'); + basket.handle(ResultEvent.failure(iTestResult)); + } - public void onTestSkipped(ITestResult iTestResult) { - printProgress('-'); - basket.handle(ResultEvent.skipped(iTestResult)); - } + public void onTestSkipped(ITestResult iTestResult) { + printProgress('-'); + basket.handle(ResultEvent.skipped(iTestResult)); + } - public void onTestFailedButWithinSuccessPercentage(ITestResult iTestResult) { - basket.handle(ResultEvent.failure(iTestResult)); - } + public void onTestFailedButWithinSuccessPercentage(ITestResult iTestResult) { + basket.handle(ResultEvent.failure(iTestResult)); + } + + public void onStart(ITestContext iTestContext) {} - public void onStart(ITestContext iTestContext) {} + public void onFinish(ITestContext iTestContext) {} - public void onFinish(ITestContext iTestContext) {} - - protected void printProgress(char progress) { - if(printEnabled) { - System.out.print(progress); - } + protected void printProgress(char progress) { + if (printEnabled) { + System.out.print(progress); } + } } -public class TestNGInstance extends TestNG{ - public TestNGInstance(Logger[] loggers, - ClassLoader testClassLoader, - CommandLineArgs args, - EventHandler eventHandler) { - addClassLoader(testClassLoader); +public class TestNGInstance extends TestNG { + public TestNGInstance( + Logger[] loggers, + ClassLoader testClassLoader, + CommandLineArgs args, + EventHandler eventHandler) { + addClassLoader(testClassLoader); - this.addListener(new TestNGListener(eventHandler)); + this.addListener(new TestNGListener(eventHandler)); - configure(args); - } + configure(args); + } } - - diff --git a/contrib/testng/src/mill/testng/TestNGRunner.java b/contrib/testng/src/mill/testng/TestNGRunner.java index d577fe8606a..bd757ca4674 100644 --- a/contrib/testng/src/mill/testng/TestNGRunner.java +++ b/contrib/testng/src/mill/testng/TestNGRunner.java @@ -10,69 +10,65 @@ class TestNGTask implements Task { - private final TaskDef taskDef; - private final ClassLoader testClassLoader; - private final CommandLineArgs cliArgs; + private final TaskDef taskDef; + private final ClassLoader testClassLoader; + private final CommandLineArgs cliArgs; - public TestNGTask(TaskDef taskDef, - ClassLoader testClassLoader, - CommandLineArgs cliArgs){ - this.taskDef = taskDef; - this.testClassLoader = testClassLoader; - this.cliArgs = cliArgs; - } + public TestNGTask(TaskDef taskDef, ClassLoader testClassLoader, CommandLineArgs cliArgs) { + this.taskDef = taskDef; + this.testClassLoader = testClassLoader; + this.cliArgs = cliArgs; + } - @Override - public String[] tags() { - return new String[0]; - } + @Override + public String[] tags() { + return new String[0]; + } - @Override - public Task[] execute(EventHandler eventHandler, Logger[] loggers) { - new TestNGInstance( - loggers, - testClassLoader, - cliArgs, - eventHandler - ).run(); - return new Task[0]; - } + @Override + public Task[] execute(EventHandler eventHandler, Logger[] loggers) { + new TestNGInstance(loggers, testClassLoader, cliArgs, eventHandler).run(); + return new Task[0]; + } - @Override - public TaskDef taskDef() { - return taskDef; - } + @Override + public TaskDef taskDef() { + return taskDef; + } } public class TestNGRunner implements Runner { - private final ClassLoader testClassLoader; - private final String[] args; - private final String[] remoteArgs; + private final ClassLoader testClassLoader; + private final String[] args; + private final String[] remoteArgs; - public TestNGRunner(String[] args, String[] remoteArgs, ClassLoader testClassLoader) { - this.testClassLoader = testClassLoader; - this.args = args; - this.remoteArgs = remoteArgs; - } + public TestNGRunner(String[] args, String[] remoteArgs, ClassLoader testClassLoader) { + this.testClassLoader = testClassLoader; + this.args = args; + this.remoteArgs = remoteArgs; + } - public Task[] tasks(TaskDef[] taskDefs) { - CommandLineArgs cliArgs = new CommandLineArgs(); - new JCommander(cliArgs).parse(args); // args is an output parameter of the constructor! - if(cliArgs.testClass == null){ - String[] names = new String[taskDefs.length]; - for(int i = 0; i < taskDefs.length; i += 1){ - names[i] = taskDefs[i].fullyQualifiedName(); - } - cliArgs.testClass = String.join(",", names); - } - if (taskDefs.length == 0) return new Task[]{}; - else return new Task[]{new TestNGTask(taskDefs[0], testClassLoader, cliArgs)}; + public Task[] tasks(TaskDef[] taskDefs) { + Task[] returnTasks = new Task[taskDefs.length]; + for (int i = 0; i < taskDefs.length; i += 1) { + CommandLineArgs cliArgs = new CommandLineArgs(); + new JCommander(cliArgs).parse(args); // args is an output parameter of the constructor! + cliArgs.testClass = taskDefs[i].fullyQualifiedName(); + returnTasks[i] = new TestNGTask(taskDefs[i], testClassLoader, cliArgs); } + return returnTasks; + } - public String done() { return null; } + public String done() { + return null; + } - public String[] remoteArgs() { return remoteArgs; } + public String[] remoteArgs() { + return remoteArgs; + } - public String[] args() { return args; } + public String[] args() { + return args; + } } diff --git a/contrib/testng/test/resources/demo/testng/src/foo/HelloTests.java b/contrib/testng/test/resources/demo/testng/src/foo/HelloTests.java new file mode 100644 index 00000000000..0404fda5c07 --- /dev/null +++ b/contrib/testng/test/resources/demo/testng/src/foo/HelloTests.java @@ -0,0 +1,14 @@ +package foo; + +import static org.testng.Assert.assertTrue; +import org.testng.annotations.Test; + +public class HelloTests { + + @Test + public void hello() throws Exception { + System.out.println("Testing Hello"); + Thread.sleep(1000); + System.out.println("Testing Hello Completed"); + } +} diff --git a/contrib/testng/test/resources/demo/testng/src/foo/WorldTests.java b/contrib/testng/test/resources/demo/testng/src/foo/WorldTests.java new file mode 100644 index 00000000000..b28be5508c0 --- /dev/null +++ b/contrib/testng/test/resources/demo/testng/src/foo/WorldTests.java @@ -0,0 +1,13 @@ +package foo; + +import static org.testng.Assert.assertTrue; +import org.testng.annotations.Test; + +public class WorldTests { + @Test + public void world() throws Exception { + System.out.println("Testing World"); + Thread.sleep(1000); + System.out.println("Testing World Completed"); + } +} diff --git a/contrib/testng/test/resources/demo/testngGrouping/src/foo/HelloTests.java b/contrib/testng/test/resources/demo/testngGrouping/src/foo/HelloTests.java new file mode 100644 index 00000000000..0404fda5c07 --- /dev/null +++ b/contrib/testng/test/resources/demo/testngGrouping/src/foo/HelloTests.java @@ -0,0 +1,14 @@ +package foo; + +import static org.testng.Assert.assertTrue; +import org.testng.annotations.Test; + +public class HelloTests { + + @Test + public void hello() throws Exception { + System.out.println("Testing Hello"); + Thread.sleep(1000); + System.out.println("Testing Hello Completed"); + } +} diff --git a/contrib/testng/test/resources/demo/testngGrouping/src/foo/WorldTests.java b/contrib/testng/test/resources/demo/testngGrouping/src/foo/WorldTests.java new file mode 100644 index 00000000000..b28be5508c0 --- /dev/null +++ b/contrib/testng/test/resources/demo/testngGrouping/src/foo/WorldTests.java @@ -0,0 +1,13 @@ +package foo; + +import static org.testng.Assert.assertTrue; +import org.testng.annotations.Test; + +public class WorldTests { + @Test + public void world() throws Exception { + System.out.println("Testing World"); + Thread.sleep(1000); + System.out.println("Testing World Completed"); + } +} diff --git a/contrib/testng/test/src/mill/testng/TestNGTests.scala b/contrib/testng/test/src/mill/testng/TestNGTests.scala index 2cab918689e..8a009d0cf65 100644 --- a/contrib/testng/test/src/mill/testng/TestNGTests.scala +++ b/contrib/testng/test/src/mill/testng/TestNGTests.scala @@ -13,7 +13,7 @@ object TestNGTests extends TestSuite { object demo extends TestBaseModule with JavaModule { object test extends JavaTests { - def testngClasspath = T { + def testngClasspath = Task { millProjectModule( "mill-contrib-testng", repositoriesTask(), @@ -21,41 +21,60 @@ object TestNGTests extends TestSuite { ) } - override def runClasspath: Target[Seq[PathRef]] = - T { super.runClasspath() ++ testngClasspath() } - override def ivyDeps = T { + override def runClasspath: T[Seq[PathRef]] = + Task { super.runClasspath() ++ testngClasspath() } + override def ivyDeps = Task { super.ivyDeps() ++ Agg( ivy"org.testng:testng:6.11", ivy"de.tototec:de.tobiasroeser.lambdatest:0.8.0" ) } - override def testFramework = T { + override def testFramework = Task { "mill.testng.TestNGFramework" } } - } + object testng extends JavaTests with TestModule.TestNg { + def ivyDeps = super.ivyDeps() ++ Agg( + ivy"org.testng:testng:7.10.2" + ) + } - val resourcePath: os.Path = os.Path(sys.env("MILL_TEST_RESOURCE_FOLDER")) / "demo" + object testngGrouping extends JavaTests with TestModule.TestNg { + def ivyDeps = super.ivyDeps() ++ Agg( + ivy"org.testng:testng:7.10.2" + ) + def testForkGrouping = discoveredTestClasses().grouped(1).toSeq + } + } + val resourcePath: os.Path = os.Path(sys.env("MILL_TEST_RESOURCE_DIR")) / "demo" def tests: Tests = Tests { - test("TestNG") { - test("demo") - UnitTester(demo, resourcePath).scoped { eval => - val Right(result) = eval.apply(demo.test.testFramework) - assert( - result.value == "mill.testng.TestNGFramework", - result.evalCount > 0 - ) - } - test("Test case lookup from inherited annotations") - UnitTester(demo, resourcePath).scoped { - eval => - val Right(result) = eval.apply(demo.test.test()) - val tres = result.value.asInstanceOf[(String, Seq[mill.testrunner.TestResult])] - assert( - tres._2.size == 8 - ) - } + test("demo") - UnitTester(demo, resourcePath).scoped { eval => + val Right(result) = eval.apply(demo.test.testFramework) + assert( + result.value == "mill.testng.TestNGFramework", + result.evalCount > 0 + ) + } + test("Test case lookup from inherited annotations") - UnitTester(demo, resourcePath).scoped { + eval => + val Right(result) = eval.apply(demo.test.test()) + val tres = result.value + assert(tres._2.size == 8) + } + test("noGrouping") - UnitTester(demo, resourcePath).scoped { + eval => + val Right(result) = eval.apply(demo.testng.test()) + val tres = result.value._2 + assert(tres.map(_.fullyQualifiedName).toSet == Set("foo.HelloTests", "foo.WorldTests")) + } + test("testForkGrouping") - UnitTester(demo, resourcePath).scoped { + eval => + val Right(result) = eval.apply(demo.testngGrouping.test()) + val tres = result.value._2 + assert(tres.map(_.fullyQualifiedName).toSet == Set("foo.HelloTests", "foo.WorldTests")) } } } diff --git a/contrib/twirllib/readme.adoc b/contrib/twirllib/readme.adoc index e11cce77c85..622e881ec35 100644 --- a/contrib/twirllib/readme.adoc +++ b/contrib/twirllib/readme.adoc @@ -55,7 +55,7 @@ import $ivy.`com.lihaoyi::mill-contrib-twirllib:`, mill.twirllib._ object app extends ScalaModule with TwirlModule { def twirlVersion = "1.5.1" - def generatedSources = T{ Seq(compileTwirl().classes) } + def generatedSources = Task { Seq(compileTwirl().classes) } } ---- @@ -99,7 +99,7 @@ object app extends ScalaModule with TwirlModule { def twirlVersion = "1.5.1" def twirlScalaVersion = "2.13.8" override def twirlImports = super.twirlImports() ++ Seq("my.additional.stuff._", "my.other.stuff._") - def generatedSources = T{ Seq(compileTwirl().classes) } + def generatedSources = Task { Seq(compileTwirl().classes) } } // out.template.scala @@ -143,7 +143,7 @@ import $ivy.`com.lihaoyi::mill-contrib-twirllib:`, mill.twirllib._ object app extends ScalaModule with TwirlModule { def twirlVersion = "1.5.1" override def twirlFormats = super.twirlFormats() + Map("svg" -> "play.twirl.api.HtmlFormat") - def generatedSources = T{ Seq(compileTwirl().classes) } + def generatedSources = Task { Seq(compileTwirl().classes) } } ---- diff --git a/contrib/twirllib/src/mill/twirllib/TwirlModule.scala b/contrib/twirllib/src/mill/twirllib/TwirlModule.scala index cb0abe3343a..0c3269e78c2 100644 --- a/contrib/twirllib/src/mill/twirllib/TwirlModule.scala +++ b/contrib/twirllib/src/mill/twirllib/TwirlModule.scala @@ -18,14 +18,14 @@ trait TwirlModule extends mill.Module { twirlModule => * The Scala version matching the twirl version. * @since Mill after 0.10.5 */ - def twirlScalaVersion: T[String] = T { + def twirlScalaVersion: T[String] = Task { twirlVersion() match { case s"1.$minor.$_" if minor.toIntOption.exists(_ < 4) => BuildInfo.workerScalaVersion212 case _ => BuildInfo.scalaVersion } } - def twirlSources: T[Seq[PathRef]] = T.sources { + def twirlSources: T[Seq[PathRef]] = Task.Sources { millSourcePath / "views" } @@ -33,7 +33,7 @@ trait TwirlModule extends mill.Module { twirlModule => * Replicate the logic from twirl build, * see: https://github.com/playframework/twirl/blob/2.0.1/build.sbt#L12-L17 */ - private def scalaParserCombinatorsVersion: T[String] = twirlScalaVersion.map { + private def scalaParserCombinatorsVersion: Task[String] = twirlScalaVersion.map { case v if v.startsWith("2.") => "1.1.2" case _ => "2.3.0" } @@ -41,7 +41,7 @@ trait TwirlModule extends mill.Module { twirlModule => /** * @since Mill after 0.10.5 */ - def twirlIvyDeps: T[Agg[Dep]] = T { + def twirlIvyDeps: T[Agg[Dep]] = Task { Agg( if (twirlVersion().startsWith("1.")) ivy"com.typesafe.play::twirl-compiler:${twirlVersion()}" @@ -57,7 +57,7 @@ trait TwirlModule extends mill.Module { twirlModule => * @since Mill after 0.10.5 */ trait TwirlResolver extends CoursierModule { - override def resolveCoursierDependency: Task[Dep => Dependency] = T.task { d: Dep => + override def resolveCoursierDependency: Task[Dep => Dependency] = Task.Anon { (d: Dep) => Lib.depToDependency(d, twirlScalaVersion()) } @@ -72,11 +72,11 @@ trait TwirlModule extends mill.Module { twirlModule => */ lazy val twirlCoursierResolver: TwirlResolver = new TwirlResolver {} - def twirlClasspath: T[Loose.Agg[PathRef]] = T { + def twirlClasspath: T[Loose.Agg[PathRef]] = Task { twirlCoursierResolver.defaultResolver().resolveDeps(twirlIvyDeps()) } - def twirlImports: T[Seq[String]] = T { + def twirlImports: T[Seq[String]] = Task { TwirlWorkerApi.twirlWorker.defaultImports(twirlClasspath()) } @@ -88,7 +88,7 @@ trait TwirlModule extends mill.Module { twirlModule => def twirlInclusiveDot: Boolean = false - def compileTwirl: T[mill.scalalib.api.CompilationResult] = T.persistent { + def compileTwirl: T[mill.scalalib.api.CompilationResult] = Task(persistent = true) { TwirlWorkerApi.twirlWorker .compile( twirlClasspath(), diff --git a/contrib/twirllib/src/mill/twirllib/TwirlWorker.scala b/contrib/twirllib/src/mill/twirllib/TwirlWorker.scala index c9ba5888879..1c1f6cfa773 100644 --- a/contrib/twirllib/src/mill/twirllib/TwirlWorker.scala +++ b/contrib/twirllib/src/mill/twirllib/TwirlWorker.scala @@ -34,7 +34,7 @@ class TwirlWorker { // NOTE: When creating the cl classloader with passing the current classloader as the parent: // val cl = new URLClassLoader(twirlClasspath.map(_.toIO.toURI.toURL).toArray, getClass.getClassLoader) // it is possible to cast the default to a Seq[String], construct our own Seq[String], and pass it to the method invoke - - // classe will be compatible (the tests passed). + // classes will be compatible (the tests passed). // But when run in an actual mill project with this module enabled, there were exceptions like this: // scala.reflect.internal.MissingRequirementError: object scala in compiler mirror not found. diff --git a/contrib/twirllib/test/src/mill/twirllib/HelloWorldTests.scala b/contrib/twirllib/test/src/mill/twirllib/HelloWorldTests.scala index ef01ce49f2d..36b0b02c063 100644 --- a/contrib/twirllib/test/src/mill/twirllib/HelloWorldTests.scala +++ b/contrib/twirllib/test/src/mill/twirllib/HelloWorldTests.scala @@ -7,6 +7,7 @@ import utest.{TestSuite, Tests, assert, _} trait HelloWorldTests extends TestSuite { val testTwirlVersion: String + val wildcard: String trait HelloWorldModule extends mill.twirllib.TwirlModule { def twirlVersion = testTwirlVersion @@ -31,7 +32,7 @@ trait HelloWorldTests extends TestSuite { } - def resourcePath = os.Path(sys.env("MILL_TEST_RESOURCE_FOLDER")) + def resourcePath = os.Path(sys.env("MILL_TEST_RESOURCE_DIR")) def compileClassfiles: Seq[os.RelPath] = Seq[os.RelPath]( os.rel / "html/hello.template.scala", os.rel / "html/wrapper.template.scala", @@ -39,8 +40,8 @@ trait HelloWorldTests extends TestSuite { ) def expectedDefaultImports: Seq[String] = Seq( - "import _root_.play.twirl.api.TwirlFeatureImports._", - "import _root_.play.twirl.api.TwirlHelperImports._", + s"import _root_.play.twirl.api.TwirlFeatureImports.$wildcard", + s"import _root_.play.twirl.api.TwirlHelperImports.$wildcard", "import _root_.play.twirl.api.Html", "import _root_.play.twirl.api.JavaScript", "import _root_.play.twirl.api.Txt", @@ -48,8 +49,8 @@ trait HelloWorldTests extends TestSuite { ) def testAdditionalImports: Seq[String] = Seq( - "mill.twirl.test.AdditionalImport1._", - "mill.twirl.test.AdditionalImport2._" + s"mill.twirl.test.AdditionalImport1.$wildcard", + s"mill.twirl.test.AdditionalImport2.$wildcard" ) def testConstructorAnnotations = Seq( @@ -159,13 +160,17 @@ trait HelloWorldTests extends TestSuite { object HelloWorldTests1_3 extends HelloWorldTests { override val testTwirlVersion = "1.3.16" + override val wildcard = "_" } object HelloWorldTests1_5 extends HelloWorldTests { override val testTwirlVersion = "1.5.2" + override val wildcard = "_" } object HelloWorldTests1_6 extends HelloWorldTests { override val testTwirlVersion = "1.6.2" + override val wildcard = "_" } object HelloWorldTests2_0 extends HelloWorldTests { override val testTwirlVersion = "2.0.1" + override val wildcard = "_" } diff --git a/contrib/versionfile/readme.adoc b/contrib/versionfile/readme.adoc index bbe1b0c37da..a464814e6cf 100644 --- a/contrib/versionfile/readme.adoc +++ b/contrib/versionfile/readme.adoc @@ -4,8 +4,8 @@ This plugin provides helpers for updating a version file and committing the changes to git. -*Note: You can still make manual changes to the version file in-between execution of the targets provided by the module.* -*Each target operates on the version file as is at the time of execution.* +*Note: You can still make manual changes to the version file in-between execution of the tasks provided by the module.* +*Each task operates on the version file as is at the time of execution.* == Quickstart @@ -83,7 +83,7 @@ In this example, it would look for the file `version` in the same directory as t == Set release version -The `setReleaseVersion` target removes the `-SNAPSHOT` identifier from the version, +The `setReleaseVersion` task removes the `-SNAPSHOT` identifier from the version, then overwrites the previous content in the version file with this new version. === Example @@ -99,7 +99,7 @@ This will update the version file to contain `0.1.0`. == Set next version -The `setNextVersion` target bumps the version and changes it to a snapshot version, +The `setNextVersion` task bumps the version and changes it to a snapshot version, then overwrites the previous content in the version file with this new version. === Parameters diff --git a/contrib/versionfile/src/mill/contrib/versionfile/VersionFileModule.scala b/contrib/versionfile/src/mill/contrib/versionfile/VersionFileModule.scala index e476e1383fe..e7f95fbd55a 100644 --- a/contrib/versionfile/src/mill/contrib/versionfile/VersionFileModule.scala +++ b/contrib/versionfile/src/mill/contrib/versionfile/VersionFileModule.scala @@ -5,33 +5,34 @@ import mill._ trait VersionFileModule extends Module { /** The file containing the current version. */ - def versionFile: T[PathRef] = T.source(millSourcePath / "version") + def versionFile: T[PathRef] = Task.Source(millSourcePath / "version") /** The current version. */ - def currentVersion: T[Version] = T { Version.of(os.read(versionFile().path).trim) } + def currentVersion: T[Version] = Task { Version.of(os.read(versionFile().path).trim) } /** The release version. */ - def releaseVersion: T[Version] = T { currentVersion().asRelease } + def releaseVersion: T[Version] = Task { currentVersion().asRelease } /** The next snapshot version. */ - def nextVersion(bump: String): Task[Version] = T.task { currentVersion().asSnapshot.bump(bump) } + def nextVersion(bump: String): Task[Version] = + Task.Anon { currentVersion().asSnapshot.bump(bump) } /** Writes the release version to file. */ - def setReleaseVersion(): Command[Unit] = T.command { + def setReleaseVersion(): Command[Unit] = Task.Command { setVersionTask(releaseVersion)() } /** Writes the next snapshot version to file. */ - def setNextVersion(bump: String): Command[Unit] = T.command { + def setNextVersion(bump: String): Command[Unit] = Task.Command { setVersionTask(nextVersion(bump))() } /** Writes the given version to file. */ - def setVersion(version: Task[Version]): Command[Unit] = T.command { + def setVersion(version: Task[Version]): Command[Unit] = Task.Command { setVersionTask(version)() } - protected def setVersionTask(version: Task[Version]) = T.task { + protected def setVersionTask(version: Task[Version]) = Task.Anon { T.log.info(generateCommitMessage(version())) writeVersionToFile(versionFile(), version()) } @@ -43,7 +44,7 @@ trait VersionFileModule extends Module { ) /** Procs for tagging current version and committing changes. */ - def tag = T { + def tag = Task { Seq( os.proc("git", "commit", "-am", generateCommitMessage(currentVersion())), os.proc("git", "tag", currentVersion().toString) @@ -51,7 +52,7 @@ trait VersionFileModule extends Module { } /** Procs for committing changes and pushing. */ - def push = T { + def push = Task { Seq( os.proc("git", "commit", "-am", generateCommitMessage(currentVersion())), os.proc("git", "push", "origin", "master", "--tags") @@ -82,7 +83,7 @@ trait VersionFileModule extends Module { object VersionFileModule extends define.ExternalModule { /** Executes the given processes. */ - def exec(procs: mill.main.Tasks[Seq[os.proc]]) = T.command { + def exec(procs: mill.main.Tasks[Seq[os.proc]]) = Task.Command { for { procs <- T.sequence(procs.value)() proc <- procs diff --git a/contrib/versionfile/test/src/mill/contrib/versionfile/VersionFileModuleTests.scala b/contrib/versionfile/test/src/mill/contrib/versionfile/VersionFileModuleTests.scala index bbbd23375e8..90e592be6bc 100644 --- a/contrib/versionfile/test/src/mill/contrib/versionfile/VersionFileModuleTests.scala +++ b/contrib/versionfile/test/src/mill/contrib/versionfile/VersionFileModuleTests.scala @@ -1,6 +1,6 @@ package mill.contrib.versionfile -import mill.T +import mill.{T, Task} import mill.testkit.{UnitTester, TestBaseModule} import utest.{TestSuite, Tests, assert, assertMatch, test} import utest.framework.TestPath @@ -108,7 +108,7 @@ object VersionFileModuleTests extends TestSuite { test("setVersion") - workspaceTest(versions: _*) { eval => val expected = Version.Release(1, 2, 4) - eval(TestModule.versionFile.setVersion(T.task(expected))) + eval(TestModule.versionFile.setVersion(Task.Anon(expected))) val Right(actual) = eval(TestModule.versionFile.currentVersion) assert(actual.value == expected) } diff --git a/dist/package.mill b/dist/package.mill new file mode 100644 index 00000000000..597c8359ff0 --- /dev/null +++ b/dist/package.mill @@ -0,0 +1,364 @@ +package build.dist +import mill._, scalalib._, publish._ +import mill.define.ModuleRef +import mill.util.Jvm +import mill.api.JarManifest +import de.tobiasroeser.mill.vcs.version.VcsVersion +import $file.ci.upload + +import scala.util.Using + +trait InstallModule extends build.MillPublishJavaModule{ + // All modules that we want to aggregate as part of this `dev` assembly. + // Excluding itself, and the `dist` module that uses it + lazy val allPublishModules = build.millInternal.modules.collect { + case m: PublishModule if (m ne build.dist) && (m ne build.dist.native) => m + } + def moduleDeps = Seq(build.runner, build.idea, build.main.init) + + def jar: T[PathRef] + + def executable = Task{ + Task.traverse(allPublishModules)(m => m.publishLocalCached)() + jar() + } + + def localBinName: String + + /** + * Build and install Mill locally. + * + * @param binFile The location where the Mill binary should be installed + * @param ivyRepo The local Ivy repository where Mill modules should be published to + */ + def installLocal(binFile: String = localBinName, ivyRepo: String = null) = + Task.Command { + PathRef(installLocalTask(Task.Anon(binFile), ivyRepo)()) + } + + val batExt = if (scala.util.Properties.isWin) ".bat" else "" + + def installLocalCache() = Task.Command { + val path = installLocalTask( + Task.Anon((os.home / ".cache" / "mill" / "download" / (build.millVersion() + batExt)).toString()) + )() + Task.log.outputStream.println(path.toString()) + PathRef(path) + } + + def installLocalTask(binFile: Task[String], ivyRepo: String = null): Task[os.Path] = Task.Anon { + val targetFile = os.Path(binFile(), Task.workspace) + if (os.exists(targetFile)) + Task.log.info(s"Overwriting existing local Mill binary at ${targetFile}") + os.copy.over(executable().path, targetFile, createFolders = true) + Task.log.info(s"Published ${build.dist.allPublishModules.size} modules and installed ${targetFile}") + targetFile + } +} + +object `package` extends RootModule with InstallModule { + + /** + * Version of [[dist]] meant for local integration testing within the Mill + * repo. Looks mostly the same as [[dist]], except it does not have a reference + * to itself in its [[testTransitiveDeps]], to avoid a circular dependency. + */ + object dist0 extends build.MillPublishJavaModule { + // disable scalafix here because it crashes when a module has no sources + def moduleDeps = Seq(build.runner, build.idea) + + def testTransitiveDeps = build.runner.testTransitiveDeps() ++ Seq( + build.main.graphviz.testDep(), + build.main.maven.testDep(), + build.runner.linenumbers.testDep(), + build.scalalib.backgroundwrapper.testDep(), + build.contrib.bloop.testDep(), + build.contrib.buildinfo.testDep(), + build.contrib.scoverage.testDep(), + build.contrib.scoverage.worker2.testDep(), + build.contrib.jmh.testDep(), + build.contrib.playlib.testDep(), + build.contrib.playlib.worker("2.8").testDep(), + build.contrib.testng.testDep(), + build.bsp.worker.testDep(), + build.testkit.testDep() + ) + } + + def testTransitiveDeps = dist0.testTransitiveDeps() ++ Seq( + (s"com.lihaoyi-${build.dist.artifactId()}", dist0.runClasspath().map(_.path).mkString("\n")) + ) + + def genTask(m: ScalaModule) = Task.Anon { Seq(m.jar(), m.sourceJar()) ++ m.runClasspath() } + + def forkArgs: T[Seq[String]] = Task { + val genIdeaArgs = + genTask(build.main.define)() ++ + genTask(build.main.eval)() ++ + genTask(build.main)() ++ + genTask(build.scalalib)() ++ + genTask(build.kotlinlib)() ++ + genTask(build.scalajslib)() ++ + genTask(build.scalanativelib)() ++ + genTask(build.javascriptlib)() ++ + genTask(build.pythonlib)() + + testArgs() ++ + Seq( + "-DMILL_CLASSPATH=" + runClasspath().map(_.path.toString).mkString(","), + "-DMILL_BUILD_LIBRARIES=" + genIdeaArgs.map(_.path).mkString(","), + s"-DBSP4J_VERSION=${build.Deps.bsp4j.dep.version}" + ) + } + + def localBinName = "mill-assembly.jar" + + def launcher = Task { + val isWin = scala.util.Properties.isWin + val outputPath = Task.dest / (if (isWin) "run.bat" else "run") + + os.write(outputPath, prependShellScript()) + if (!isWin) os.perms.set(outputPath, "rwxrwxrwx") + + PathRef(outputPath) + } + + def extraPublish: T[Seq[PublishInfo]] = Task { + Seq(PublishInfo(file = assembly(), classifier = Some("assembly"), ivyConfig = "compile")) + } + + def assemblyRules = super.assemblyRules ++ Seq( + mill.scalalib.Assembly.Rule.ExcludePattern("mill/local-test-overrides/.*") + ) + + def jar = Task { + val version = build.millVersion() + val devRunClasspath = runClasspath().map(_.path) + val filename = if (scala.util.Properties.isWin) "mill.bat" else "mill" + val commonArgs = Seq( + // Workaround for Zinc/JNA bug + // https://github.com/sbt/sbt/blame/6718803ee6023ab041b045a6988fafcfae9d15b5/main/src/main/scala/sbt/Main.scala#L130 + "-Djna.nosys=true" + ) + val shellArgs = Seq("-DMILL_CLASSPATH=$0") ++ commonArgs + val cmdArgs = Seq(""""-DMILL_CLASSPATH=%~dpnx0"""") ++ commonArgs + os.move( + mill.scalalib.Assembly.createAssembly( + devRunClasspath, + prependShellScript = launcherScript(shellArgs, cmdArgs, Agg("$0"), Agg("%~dpnx0")), + assemblyRules = assemblyRules + ).path, + Task.dest / filename + ) + PathRef(Task.dest / filename) + } + + def prependShellScript = Task { + val (millArgs, otherArgs) = + forkArgs().partition(arg => arg.startsWith("-DMILL") && !arg.startsWith("-DMILL_VERSION")) + // Pass Mill options via file, due to small max args limit in Windows + val vmOptionsFile = Task.dest / "mill.properties" + val millOptionsContent = + millArgs.map(_.drop(2).replace("\\", "/")).mkString( + "\r\n" + ) // drop -D prefix, replace \ with / + os.write(vmOptionsFile, millOptionsContent) + val jvmArgs = otherArgs ++ List(s"-DMILL_OPTIONS_PATH=$vmOptionsFile") + val classpath = runClasspath().map(_.path.toString) + val classpathJar = Task.dest / "classpath.jar" + Jvm.createClasspathPassingJar(classpathJar, runClasspath().map(_.path)) + + launcherScript( + jvmArgs, + jvmArgs, + classpath, + Agg(classpathJar.toString()) // TODO not working yet on Windows! see #791 + ) + } + + def run(args: Task[Args] = Task.Anon(Args())) = Task.Command(exclusive = true) { + args().value match { + case Nil => mill.api.Result.Failure("Need to pass in cwd as first argument to dist.run") + case wd0 +: rest => + val wd = os.Path(wd0, Task.workspace) + os.makeDir.all(wd) + try { + Jvm.runSubprocess( + Seq(launcher().path.toString) ++ rest, + forkEnv(), + workingDir = wd + ) + mill.api.Result.Success(()) + } catch { + case e: Throwable => + mill.api.Result.Failure(s"dist.run failed with an exception. ${e.getMessage()}") + } + } + } + def launcherScript( + shellJvmArgs: Seq[String], + cmdJvmArgs: Seq[String], + shellClassPath: Agg[String], + cmdClassPath: Agg[String] + ) = { + + val millMainClass = "mill.runner.client.MillClientMain" + + Jvm.universalScript( + shellCommands = { + val jvmArgsStr = shellJvmArgs.mkString(" ") + val classpathStr = shellClassPath.mkString(":") + + s"""if [ -z "$$JAVA_HOME" ] ; then + | JAVACMD="java" + |else + | JAVACMD="$$JAVA_HOME/bin/java" + |fi + | + |exec "$$JAVACMD" $jvmArgsStr $$JAVA_OPTS -cp "$classpathStr" $millMainClass "$$@" + |""".stripMargin + }, + cmdCommands = { + val jvmArgsStr = cmdJvmArgs.mkString(" ") + val classpathStr = cmdClassPath.mkString(";") + s"""setlocal EnableDelayedExpansion + |set "JAVACMD=java.exe" + |if not "%JAVA_HOME%"=="" set "JAVACMD=%JAVA_HOME%\\bin\\java.exe" + | + |"%JAVACMD%" $jvmArgsStr %JAVA_OPTS% -cp "$classpathStr" $millMainClass %* + | + |endlocal + |""".stripMargin + } + ) + } + + def millBootstrap = Task.Source(Task.workspace / "mill") + def millBootstrapBat = Task.Source(Task.workspace / "mill.bat") + + def prepareBootstrapLauncher(bootstrap: os.Path, dest: os.Path, buildVersion: String, fileName: String) = { + val outputPath = dest / fileName + val millBootstrapGrepPrefix = "(\n *DEFAULT_MILL_VERSION=)" + + os.write( + outputPath, + os.read(bootstrap) + .replaceAll( + millBootstrapGrepPrefix + "[^\\n]+", + "$1" + buildVersion + ) + ) + + if (!scala.util.Properties.isWin) os.perms.set(outputPath, "rwxrwxrwx") + + PathRef(outputPath) + } + def bootstrapLauncher = Task { + prepareBootstrapLauncher(millBootstrap().path, Task.dest, build.millVersion(), "mill") + } + def bootstrapLauncherBat = Task { + prepareBootstrapLauncher(millBootstrapBat().path, Task.dest, build.millVersion(), "mill.bat") + } + + def examplePathsWithArtifactName:Task[Seq[(os.Path,String)]] = Task.Anon{ + for { + exampleMod <- build.example.exampleModules + path = exampleMod.millSourcePath + } yield { + val example = path.subRelativeTo(Task.workspace) + val artifactName = build.millVersion() + "-" + example.segments.mkString("-") + (path, artifactName) + } + } + + + def exampleZips: T[Seq[PathRef]] = Task { + examplePathsWithArtifactName().map{ case (examplePath, exampleStr) => + os.copy(examplePath, Task.dest / exampleStr, createFolders = true) + os.write(Task.dest / exampleStr / ".mill-version", build.millLastTag()) + os.copy(bootstrapLauncher().path, Task.dest / exampleStr / "mill") + os.copy(bootstrapLauncherBat().path, Task.dest / exampleStr / "mill.bat") + val zip = Task.dest / s"$exampleStr.zip" + os.proc("zip", "-r", zip, exampleStr).call(cwd = Task.dest) + PathRef(zip) + } + } + + def uploadToGithub(authKey: String) = Task.Command { + val vcsState = VcsVersion.vcsState() + val label = vcsState.format() + if (label != build.millVersion()) sys.error("Modified mill version detected, aborting upload") + val releaseTag = vcsState.lastTag.getOrElse(sys.error( + "Incomplete git history. No tag found.\nIf on CI, make sure your git checkout job includes enough history." + )) + + if (releaseTag == label) { + // TODO: check if the tag already exists (e.g. because we created it manually) and do not fail + requests.post( + s"https://api.github.com/repos/${build.Settings.githubOrg}/${build.Settings.githubRepo}/releases", + data = ujson.Obj("tag_name" -> releaseTag, "name" -> releaseTag, "prerelease" -> true), + headers = Seq("Authorization" -> ("token " + authKey)) + ) + } + + val examples = exampleZips().map(z => (z.path, z.path.last)) + + val zips = examples ++ Seq( + (build.dist.assembly().path, label + "-assembly"), + (bootstrapLauncher().path, label), + (bootstrapLauncherBat().path, label + ".bat") + ) + + for ((zip, name) <- zips) { + upload.apply( + zip, + releaseTag, + name, + authKey, + build.Settings.githubOrg, + build.Settings.githubRepo + ) + } + } + + object native extends mill.scalalib.NativeImageModule with InstallModule { + def artifactOsSuffix = T{ + val osName = System.getProperty("os.name").toLowerCase + if (osName.contains("mac")) "mac" + else if (osName.contains("windows")) "windows" + else "linux" + } + + def artifactCpuSuffix = T{ System.getProperty("os.arch") } + def artifactName = s"${super.artifactName()}-${artifactOsSuffix()}-${artifactCpuSuffix()}" + + def mainClass = Some("mill.runner.client.MillClientMain") + + def nativeImageClasspath = build.runner.client.runClasspath() + + def localBinName = "mill-native" + + def jar = Task { + val previous = nativeImage().path + val executable = Task.dest / previous.baseName + + Using(os.write.outputStream(executable)) { out => + out.write(os.read.bytes(previous)) + out.write(System.lineSeparator.getBytes) + out.write(os.read.bytes(assembly().path)) + } + + if (!mill.main.client.Util.isWindows) os.perms.set(executable, "rwxrwxrwx") + + PathRef(executable) + } + + def nativeImageOptions = Seq("--no-fallback", "--enable-url-protocols=https") + + def zincWorker = ModuleRef(ZincWorkerGraalvm) + + object ZincWorkerGraalvm extends ZincWorkerModule { + def jvmId = build.Settings.graalvmJvmId + } + } +} diff --git a/dist/resources/META-INF/native-image/reachability-metadata.json b/dist/resources/META-INF/native-image/reachability-metadata.json new file mode 100644 index 00000000000..945749cdad1 --- /dev/null +++ b/dist/resources/META-INF/native-image/reachability-metadata.json @@ -0,0 +1,15 @@ +{ + "reflection": [ + { + "type": "mill.runner.MillMain" + }, + { + "type": "mill.runner.MillServerMain" + } + ], + "resources": [ + { + "glob": "logback.xml" + } + ] +} \ No newline at end of file diff --git a/dist/resources/logback.xml b/dist/resources/logback.xml new file mode 100644 index 00000000000..bd38360c7f0 --- /dev/null +++ b/dist/resources/logback.xml @@ -0,0 +1,11 @@ + + + out/mill.log + + %date %level [%thread] %logger{36} %msg%n + + + + + + diff --git a/docs/modules/ROOT/images/ChromeTracing.png b/docs/modules/ROOT/images/basic/ChromeTracing.png similarity index 100% rename from docs/modules/ROOT/images/ChromeTracing.png rename to docs/modules/ROOT/images/basic/ChromeTracing.png diff --git a/docs/modules/ROOT/images/IntellijApp.png b/docs/modules/ROOT/images/basic/IntellijApp.png similarity index 100% rename from docs/modules/ROOT/images/IntellijApp.png rename to docs/modules/ROOT/images/basic/IntellijApp.png diff --git a/docs/modules/ROOT/images/IntellijBuild.png b/docs/modules/ROOT/images/basic/IntellijBuild.png similarity index 100% rename from docs/modules/ROOT/images/IntellijBuild.png rename to docs/modules/ROOT/images/basic/IntellijBuild.png diff --git a/docs/modules/ROOT/images/basic/IntellijFileTypeConfig.png b/docs/modules/ROOT/images/basic/IntellijFileTypeConfig.png new file mode 100644 index 00000000000..439d6c78ea5 Binary files /dev/null and b/docs/modules/ROOT/images/basic/IntellijFileTypeConfig.png differ diff --git a/docs/modules/ROOT/images/IntellijRefresh.png b/docs/modules/ROOT/images/basic/IntellijRefresh.png similarity index 100% rename from docs/modules/ROOT/images/IntellijRefresh.png rename to docs/modules/ROOT/images/basic/IntellijRefresh.png diff --git a/docs/modules/ROOT/images/basic/IntellijSelectBsp.png b/docs/modules/ROOT/images/basic/IntellijSelectBsp.png new file mode 100644 index 00000000000..73e383b3ffa Binary files /dev/null and b/docs/modules/ROOT/images/basic/IntellijSelectBsp.png differ diff --git a/docs/modules/ROOT/images/VSCodeApp.png b/docs/modules/ROOT/images/basic/VSCodeApp.png similarity index 100% rename from docs/modules/ROOT/images/VSCodeApp.png rename to docs/modules/ROOT/images/basic/VSCodeApp.png diff --git a/docs/modules/ROOT/images/VSCodeBuild.png b/docs/modules/ROOT/images/basic/VSCodeBuild.png similarity index 100% rename from docs/modules/ROOT/images/VSCodeBuild.png rename to docs/modules/ROOT/images/basic/VSCodeBuild.png diff --git a/docs/modules/ROOT/images/VSCodeRefresh.png b/docs/modules/ROOT/images/basic/VSCodeRefresh.png similarity index 100% rename from docs/modules/ROOT/images/VSCodeRefresh.png rename to docs/modules/ROOT/images/basic/VSCodeRefresh.png diff --git a/docs/modules/ROOT/images/basic/VisualizeCompiles.svg b/docs/modules/ROOT/images/basic/VisualizeCompiles.svg new file mode 100644 index 00000000000..0bcc404a14f --- /dev/null +++ b/docs/modules/ROOT/images/basic/VisualizeCompiles.svg @@ -0,0 +1,49 @@ + + +example1 + + + +bar.compile + +bar.compile + + + +bar.test.compile + +bar.test.compile + + + +bar.compile->bar.test.compile + + + + + +foo.compile + +foo.compile + + + +bar.compile->foo.compile + + + + + +foo.test.compile + +foo.test.compile + + + +foo.compile->foo.test.compile + + + + + diff --git a/docs/modules/ROOT/images/VisualizeJava.svg b/docs/modules/ROOT/images/basic/VisualizeJava.svg similarity index 100% rename from docs/modules/ROOT/images/VisualizeJava.svg rename to docs/modules/ROOT/images/basic/VisualizeJava.svg diff --git a/docs/modules/ROOT/images/VisualizePlanJava.svg b/docs/modules/ROOT/images/basic/VisualizePlanJava.svg similarity index 100% rename from docs/modules/ROOT/images/VisualizePlanJava.svg rename to docs/modules/ROOT/images/basic/VisualizePlanJava.svg diff --git a/docs/modules/ROOT/images/VisualizePlanScala.svg b/docs/modules/ROOT/images/basic/VisualizePlanScala.svg similarity index 100% rename from docs/modules/ROOT/images/VisualizePlanScala.svg rename to docs/modules/ROOT/images/basic/VisualizePlanScala.svg diff --git a/docs/modules/ROOT/images/basic/VisualizeTestDeps.svg b/docs/modules/ROOT/images/basic/VisualizeTestDeps.svg new file mode 100644 index 00000000000..5bf2abaecde --- /dev/null +++ b/docs/modules/ROOT/images/basic/VisualizeTestDeps.svg @@ -0,0 +1,55 @@ + + +example1 + + + +baz.compile + +baz.compile + + + +baz.test.compile + +baz.test.compile + + + +baz.compile->baz.test.compile + + + + + +qux.compile + +qux.compile + + + +baz.compile->qux.compile + + + + + +qux.test.compile + +qux.test.compile + + + +baz.test.compile->qux.test.compile + + + + + +qux.compile->qux.test.compile + + + + + diff --git a/docs/modules/ROOT/images/GatlingCompileGraph.svg b/docs/modules/ROOT/images/comparisons/GatlingCompileGraph.svg similarity index 100% rename from docs/modules/ROOT/images/GatlingCompileGraph.svg rename to docs/modules/ROOT/images/comparisons/GatlingCompileGraph.svg diff --git a/docs/modules/ROOT/images/GatlingCompileProfile.png b/docs/modules/ROOT/images/comparisons/GatlingCompileProfile.png similarity index 100% rename from docs/modules/ROOT/images/GatlingCompileProfile.png rename to docs/modules/ROOT/images/comparisons/GatlingCompileProfile.png diff --git a/docs/modules/ROOT/images/IntellijGatlingMillPlugin1.png b/docs/modules/ROOT/images/comparisons/IntellijGatlingMillPlugin1.png similarity index 100% rename from docs/modules/ROOT/images/IntellijGatlingMillPlugin1.png rename to docs/modules/ROOT/images/comparisons/IntellijGatlingMillPlugin1.png diff --git a/docs/modules/ROOT/images/IntellijGatlingMillPlugin2.png b/docs/modules/ROOT/images/comparisons/IntellijGatlingMillPlugin2.png similarity index 100% rename from docs/modules/ROOT/images/IntellijGatlingMillPlugin2.png rename to docs/modules/ROOT/images/comparisons/IntellijGatlingMillPlugin2.png diff --git a/docs/modules/ROOT/images/IntellijGatlingMillTask1.png b/docs/modules/ROOT/images/comparisons/IntellijGatlingMillTask1.png similarity index 100% rename from docs/modules/ROOT/images/IntellijGatlingMillTask1.png rename to docs/modules/ROOT/images/comparisons/IntellijGatlingMillTask1.png diff --git a/docs/modules/ROOT/images/IntellijGatlingMillTask2.png b/docs/modules/ROOT/images/comparisons/IntellijGatlingMillTask2.png similarity index 100% rename from docs/modules/ROOT/images/IntellijGatlingMillTask2.png rename to docs/modules/ROOT/images/comparisons/IntellijGatlingMillTask2.png diff --git a/docs/modules/ROOT/images/IntellijGatlingMillTask3.png b/docs/modules/ROOT/images/comparisons/IntellijGatlingMillTask3.png similarity index 100% rename from docs/modules/ROOT/images/IntellijGatlingMillTask3.png rename to docs/modules/ROOT/images/comparisons/IntellijGatlingMillTask3.png diff --git a/docs/modules/ROOT/images/IntellijGatlingSbtPlugin1.png b/docs/modules/ROOT/images/comparisons/IntellijGatlingSbtPlugin1.png similarity index 100% rename from docs/modules/ROOT/images/IntellijGatlingSbtPlugin1.png rename to docs/modules/ROOT/images/comparisons/IntellijGatlingSbtPlugin1.png diff --git a/docs/modules/ROOT/images/IntellijGatlingSbtPlugin2.png b/docs/modules/ROOT/images/comparisons/IntellijGatlingSbtPlugin2.png similarity index 100% rename from docs/modules/ROOT/images/IntellijGatlingSbtPlugin2.png rename to docs/modules/ROOT/images/comparisons/IntellijGatlingSbtPlugin2.png diff --git a/docs/modules/ROOT/images/IntellijGatlingSbtTask1.png b/docs/modules/ROOT/images/comparisons/IntellijGatlingSbtTask1.png similarity index 100% rename from docs/modules/ROOT/images/IntellijGatlingSbtTask1.png rename to docs/modules/ROOT/images/comparisons/IntellijGatlingSbtTask1.png diff --git a/docs/modules/ROOT/images/IntellijGatlingSbtTask2.png b/docs/modules/ROOT/images/comparisons/IntellijGatlingSbtTask2.png similarity index 100% rename from docs/modules/ROOT/images/IntellijGatlingSbtTask2.png rename to docs/modules/ROOT/images/comparisons/IntellijGatlingSbtTask2.png diff --git a/docs/modules/ROOT/images/comparisons/IntellijGradleResourcesClasses.png b/docs/modules/ROOT/images/comparisons/IntellijGradleResourcesClasses.png new file mode 100644 index 00000000000..d0e76264866 Binary files /dev/null and b/docs/modules/ROOT/images/comparisons/IntellijGradleResourcesClasses.png differ diff --git a/docs/modules/ROOT/images/comparisons/IntellijGradleResourcesClassesDefinition.png b/docs/modules/ROOT/images/comparisons/IntellijGradleResourcesClassesDefinition.png new file mode 100644 index 00000000000..ec98b4d192f Binary files /dev/null and b/docs/modules/ROOT/images/comparisons/IntellijGradleResourcesClassesDefinition.png differ diff --git a/docs/modules/ROOT/images/comparisons/IntellijMockitoGradleCompileOptions.png b/docs/modules/ROOT/images/comparisons/IntellijMockitoGradleCompileOptions.png new file mode 100644 index 00000000000..ab37d9a6313 Binary files /dev/null and b/docs/modules/ROOT/images/comparisons/IntellijMockitoGradleCompileOptions.png differ diff --git a/docs/modules/ROOT/images/comparisons/IntellijMockitoGradleCompileOptions2.png b/docs/modules/ROOT/images/comparisons/IntellijMockitoGradleCompileOptions2.png new file mode 100644 index 00000000000..0deb22557b7 Binary files /dev/null and b/docs/modules/ROOT/images/comparisons/IntellijMockitoGradleCompileOptions2.png differ diff --git a/docs/modules/ROOT/images/comparisons/IntellijMockitoMillCompile.png b/docs/modules/ROOT/images/comparisons/IntellijMockitoMillCompile.png new file mode 100644 index 00000000000..d5aa21e1d7c Binary files /dev/null and b/docs/modules/ROOT/images/comparisons/IntellijMockitoMillCompile.png differ diff --git a/docs/modules/ROOT/images/comparisons/IntellijMockitoMillCompileClasspath.png b/docs/modules/ROOT/images/comparisons/IntellijMockitoMillCompileClasspath.png new file mode 100644 index 00000000000..d8e6df5459f Binary files /dev/null and b/docs/modules/ROOT/images/comparisons/IntellijMockitoMillCompileClasspath.png differ diff --git a/docs/modules/ROOT/images/comparisons/IntellijMockitoMillCompileUsages.png b/docs/modules/ROOT/images/comparisons/IntellijMockitoMillCompileUsages.png new file mode 100644 index 00000000000..ea7d5da7ba4 Binary files /dev/null and b/docs/modules/ROOT/images/comparisons/IntellijMockitoMillCompileUsages.png differ diff --git a/docs/modules/ROOT/images/comparisons/IntellijMockitoMillJavacOptionsDef.png b/docs/modules/ROOT/images/comparisons/IntellijMockitoMillJavacOptionsDef.png new file mode 100644 index 00000000000..21932299979 Binary files /dev/null and b/docs/modules/ROOT/images/comparisons/IntellijMockitoMillJavacOptionsDef.png differ diff --git a/docs/modules/ROOT/images/comparisons/IntellijMockitoMillJavacOptionsDocs.png b/docs/modules/ROOT/images/comparisons/IntellijMockitoMillJavacOptionsDocs.png new file mode 100644 index 00000000000..09973c6a367 Binary files /dev/null and b/docs/modules/ROOT/images/comparisons/IntellijMockitoMillJavacOptionsDocs.png differ diff --git a/docs/modules/ROOT/images/comparisons/IntellijMockitoMillJavacOptionsParents.png b/docs/modules/ROOT/images/comparisons/IntellijMockitoMillJavacOptionsParents.png new file mode 100644 index 00000000000..98f82592fab Binary files /dev/null and b/docs/modules/ROOT/images/comparisons/IntellijMockitoMillJavacOptionsParents.png differ diff --git a/docs/modules/ROOT/images/comparisons/IntellijNettyAutocomplete.png b/docs/modules/ROOT/images/comparisons/IntellijNettyAutocomplete.png new file mode 100644 index 00000000000..1b94538f236 Binary files /dev/null and b/docs/modules/ROOT/images/comparisons/IntellijNettyAutocomplete.png differ diff --git a/docs/modules/ROOT/images/comparisons/IntellijNettyPeekDocs.png b/docs/modules/ROOT/images/comparisons/IntellijNettyPeekDocs.png new file mode 100644 index 00000000000..905250e2070 Binary files /dev/null and b/docs/modules/ROOT/images/comparisons/IntellijNettyPeekDocs.png differ diff --git a/docs/modules/ROOT/images/MockitoCompileGraph.svg b/docs/modules/ROOT/images/comparisons/MockitoCompileGraph.svg similarity index 100% rename from docs/modules/ROOT/images/MockitoCompileGraph.svg rename to docs/modules/ROOT/images/comparisons/MockitoCompileGraph.svg diff --git a/docs/modules/ROOT/images/MockitoCompileProfile.png b/docs/modules/ROOT/images/comparisons/MockitoCompileProfile.png similarity index 100% rename from docs/modules/ROOT/images/MockitoCompileProfile.png rename to docs/modules/ROOT/images/comparisons/MockitoCompileProfile.png diff --git a/docs/modules/ROOT/images/NettyCompileGraph.svg b/docs/modules/ROOT/images/comparisons/NettyCompileGraph.svg similarity index 100% rename from docs/modules/ROOT/images/NettyCompileGraph.svg rename to docs/modules/ROOT/images/comparisons/NettyCompileGraph.svg diff --git a/docs/modules/ROOT/images/NettyCompileProfile.png b/docs/modules/ROOT/images/comparisons/NettyCompileProfile.png similarity index 100% rename from docs/modules/ROOT/images/NettyCompileProfile.png rename to docs/modules/ROOT/images/comparisons/NettyCompileProfile.png diff --git a/docs/modules/ROOT/images/index/gradle.svg b/docs/modules/ROOT/images/index/gradle.svg new file mode 100644 index 00000000000..8e7431e8230 --- /dev/null +++ b/docs/modules/ROOT/images/index/gradle.svg @@ -0,0 +1,19 @@ + + + diff --git a/docs/modules/ROOT/images/index/iconscout-java.svg b/docs/modules/ROOT/images/index/iconscout-java.svg new file mode 100644 index 00000000000..acf25c47343 --- /dev/null +++ b/docs/modules/ROOT/images/index/iconscout-java.svg @@ -0,0 +1,7 @@ + + + + + + + diff --git a/docs/modules/ROOT/images/index/iconscout-kotlin.svg b/docs/modules/ROOT/images/index/iconscout-kotlin.svg new file mode 100644 index 00000000000..48ff1667158 --- /dev/null +++ b/docs/modules/ROOT/images/index/iconscout-kotlin.svg @@ -0,0 +1,26 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/docs/modules/ROOT/images/index/iconscout-scala.svg b/docs/modules/ROOT/images/index/iconscout-scala.svg new file mode 100644 index 00000000000..279070c5eec --- /dev/null +++ b/docs/modules/ROOT/images/index/iconscout-scala.svg @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + diff --git a/docs/modules/ROOT/images/index/maven.png b/docs/modules/ROOT/images/index/maven.png new file mode 100644 index 00000000000..5b57ab516c9 Binary files /dev/null and b/docs/modules/ROOT/images/index/maven.png differ diff --git a/docs/modules/ROOT/images/index/sbt.png b/docs/modules/ROOT/images/index/sbt.png new file mode 100644 index 00000000000..d684bbca7d1 Binary files /dev/null and b/docs/modules/ROOT/images/index/sbt.png differ diff --git a/docs/modules/ROOT/images/unique/IntellijDefinition.png b/docs/modules/ROOT/images/unique/IntellijDefinition.png new file mode 100644 index 00000000000..18a92dc78d1 Binary files /dev/null and b/docs/modules/ROOT/images/unique/IntellijDefinition.png differ diff --git a/docs/modules/ROOT/images/unique/IntellijDocs.png b/docs/modules/ROOT/images/unique/IntellijDocs.png new file mode 100644 index 00000000000..589910d921e Binary files /dev/null and b/docs/modules/ROOT/images/unique/IntellijDocs.png differ diff --git a/docs/modules/ROOT/images/unique/IntellijError.png b/docs/modules/ROOT/images/unique/IntellijError.png new file mode 100644 index 00000000000..62d6758249d Binary files /dev/null and b/docs/modules/ROOT/images/unique/IntellijError.png differ diff --git a/docs/modules/ROOT/images/unique/IntellijOverride.png b/docs/modules/ROOT/images/unique/IntellijOverride.png new file mode 100644 index 00000000000..b46b5898b9f Binary files /dev/null and b/docs/modules/ROOT/images/unique/IntellijOverride.png differ diff --git a/docs/modules/ROOT/nav.adoc b/docs/modules/ROOT/nav.adoc index 140ce82da77..d9e659bbd4a 100644 --- a/docs/modules/ROOT/nav.adoc +++ b/docs/modules/ROOT/nav.adoc @@ -3,81 +3,118 @@ // but we intentionally skim over them and do not go into depth: the focus is // on end user goals and how to achieve them. -.xref:Java_Intro_to_Mill.adoc[] -* xref:Java_Installation_IDE_Support.adoc[] -* xref:Java_Builtin_Commands.adoc[] -* xref:Java_Module_Config.adoc[] -* xref:Java_Build_Examples.adoc[] -* xref:Testing_Java_Projects.adoc[] -* xref:Linting_Java_Projects.adoc[] -// * xref:Publishing_Java_Projects.adoc[] -* xref:Java_Web_Examples.adoc[] -* xref:Case_Study_Mill_vs_Maven.adoc[] -* xref:Case_Study_Mill_vs_Gradle.adoc[] - -.xref:Scala_Intro_to_Mill.adoc[] -* xref:Scala_Installation_IDE_Support.adoc[] -* xref:Scala_Builtin_Commands.adoc[] -* xref:Scala_Module_Config.adoc[] -* xref:Scala_Build_Examples.adoc[] -* xref:Testing_Scala_Projects.adoc[] -// * xref:Publishing_Scala_Projects.adoc[] -* xref:Scala_Web_Examples.adoc[] -* xref:Case_Study_Mill_vs_SBT.adoc[] - -// This section is all about developing a deeper understanding of specific -// topics in Mill. This is the opposite of `Quick Start` above: while we touch -// on some end-user use cases, it is only to motivate the Mill features that we -// want to present to the reader. The focus is on Mill's design and -// functionality. -.Mill In Depth -* xref:Library_Dependencies.adoc[] -* xref:Out_Dir.adoc[] -* xref:Tasks.adoc[] -* xref:Modules.adoc[] -* xref:Cross_Builds.adoc[] -* xref:Target_Query_Syntax.adoc[] -* xref:Structuring_Large_Builds.adoc[] - -* xref:The_Mill_Evaluation_Model.adoc[] -* xref:Mill_Sandboxing.adoc[] - +* xref:javalib/intro.adoc[] +** xref:javalib/module-config.adoc[] +** xref:javalib/dependencies.adoc[] +** xref:javalib/testing.adoc[] +** xref:javalib/linting.adoc[] +** xref:javalib/publishing.adoc[] +** xref:javalib/build-examples.adoc[] +** xref:javalib/web-examples.adoc[] +* xref:scalalib/intro.adoc[] +** xref:scalalib/module-config.adoc[] +** xref:scalalib/dependencies.adoc[] +** xref:scalalib/testing.adoc[] +** xref:scalalib/linting.adoc[] +** xref:scalalib/publishing.adoc[] +** xref:scalalib/build-examples.adoc[] +** xref:scalalib/web-examples.adoc[] +** xref:scalalib/native-examples.adoc[] +* xref:kotlinlib/intro.adoc[] +** xref:kotlinlib/module-config.adoc[] +** xref:kotlinlib/dependencies.adoc[] +** xref:kotlinlib/testing.adoc[] +** xref:kotlinlib/linting.adoc[] +** xref:kotlinlib/publishing.adoc[] +// ** xref:kotlinlib/build-examples.adoc[] +** xref:kotlinlib/web-examples.adoc[] +* Experimental Platform Support +** Building Android Apps +*** xref:android/java.adoc[] +*** xref:android/kotlin.adoc[] +*** xref:android/android-linting.adoc[] +** xref:pythonlib/intro.adoc[] +*** xref:pythonlib/module-config.adoc[] +*** xref:pythonlib/dependencies.adoc[] +*** xref:pythonlib/publishing.adoc[] +*** xref:pythonlib/web-examples.adoc[] +*** xref:pythonlib/testing.adoc[] +** xref:javascriptlib/intro.adoc[] +*** xref:javascriptlib/dependencies.adoc[] +*** xref:javascriptlib/module-config.adoc[] +*** xref:javascriptlib/testing.adoc[] +* xref:comparisons/why-mill.adoc[] +** xref:comparisons/maven.adoc[] +** xref:comparisons/gradle.adoc[] +** xref:comparisons/sbt.adoc[] +** xref:comparisons/unique.adoc[] +* The Mill CLI +** xref:cli/installation-ide.adoc[] +** xref:cli/flags.adoc[] +** xref:cli/builtin-commands.adoc[] +** xref:cli/query-syntax.adoc[] +* xref:migrating/migrating.adoc[] +** xref:migrating/maven.adoc[] +// This section gives a tour of the various user-facing features of Mill: +// library deps, out folder, queries, tasks, etc.. These are things that +// every Mill user will likely encounter, and are touched upon in the various +// language-specific sections, but here we go into a deeper language-agnostic +// discussion of what these Mill features ar and how they work +* Mill Fundamentals +** xref:fundamentals/tasks.adoc[] +** xref:fundamentals/modules.adoc[] +** xref:fundamentals/out-dir.adoc[] +** xref:fundamentals/library-deps.adoc[] +** xref:fundamentals/cross-builds.adoc[] +** xref:fundamentals/bundled-libraries.adoc[] +** xref:fundamentals/configuring-jvm-versions.adoc[] // This section talks about Mill plugins. While it could theoretically fit in // either section above, it is probably an important enough topic it is worth // breaking out on its own -.Extending Mill -* xref:Import_File_And_Import_Ivy.adoc[] -* xref:Using_Plugins.adoc[] -* xref:Contrib_Plugins.adoc[] +* Extending Mill +** xref:extending/import-ivy-plugins.adoc[] +** xref:extending/contrib-plugins.adoc[] // See also the list in Contrib_Plugins.adoc -** xref:contrib/artifactory.adoc[] -** xref:contrib/bintray.adoc[] -** xref:contrib/bloop.adoc[] -** xref:contrib/buildinfo.adoc[] -** xref:contrib/codeartifact.adoc[] -** xref:contrib/docker.adoc[] -** xref:contrib/errorprone.adoc[] -** xref:contrib/checkstyle.adoc[] -** xref:contrib/flyway.adoc[] -** xref:contrib/gitlab.adoc[] -** xref:contrib/jmh.adoc[] -** xref:contrib/playlib.adoc[] -** xref:contrib/proguard.adoc[] -** xref:contrib/scalapblib.adoc[] -** xref:contrib/scoverage.adoc[] -** xref:contrib/sonatypecentral.adoc[] -** xref:contrib/testng.adoc[] -** xref:contrib/twirllib.adoc[] -** xref:contrib/versionfile.adoc[] -* xref:Thirdparty_Plugins.adoc[] -* xref:Writing_Mill_Plugins.adoc[] -* xref:The_Mill_Meta_Build.adoc[] +*** xref:contrib/artifactory.adoc[] +*** xref:contrib/bintray.adoc[] +*** xref:contrib/bloop.adoc[] +*** xref:contrib/buildinfo.adoc[] +*** xref:contrib/codeartifact.adoc[] +*** xref:contrib/docker.adoc[] +*** xref:contrib/flyway.adoc[] +*** xref:contrib/gitlab.adoc[] +*** xref:contrib/jmh.adoc[] +*** xref:contrib/playlib.adoc[] +*** xref:contrib/proguard.adoc[] +*** xref:contrib/scalapblib.adoc[] +*** xref:contrib/scoverage.adoc[] +*** xref:contrib/sonatypecentral.adoc[] +*** xref:contrib/testng.adoc[] +*** xref:contrib/twirllib.adoc[] +*** xref:contrib/versionfile.adoc[] +** xref:extending/thirdparty-plugins.adoc[] +** xref:extending/running-jvm-code.adoc[] +** xref:extending/writing-plugins.adoc[] +** xref:extending/meta-build.adoc[] +** xref:extending/example-typescript-support.adoc[] +** xref:extending/example-python-support.adoc[] +// This section focuses on diving into deeper, more advanced topics for Mill. +// These are things that most Mill developers would not encounter day to day, +// but people developing Mill plugins or working on particularly large or +// sophisticated Mill builds will need to understand. +* xref:large/large.adoc[] +** xref:large/selective-execution.adoc[] +** xref:large/multi-file-builds.adoc[] +* Mill In Depth +** xref:depth/sandboxing.adoc[] +** xref:depth/execution-model.adoc[] +** xref:depth/process-architecture.adoc[] +** xref:depth/design-principles.adoc[] +** xref:depth/why-scala.adoc[] // Reference pages that a typical user would not typically read top-to-bottom, // but may need to look up once in a while, and thus should be written down // *somewhere*. -.Reference -* xref:Mill_Design_Principles.adoc[] -* xref:Bundled_Libraries.adoc[] -* {mill-doc-url}/api/latest/mill/index.html[Mill Scaladoc] -* xref:Changelog.adoc[] +* Reference +** {mill-doc-url}/api/latest/mill/index.html[Mill Scaladoc] +** xref:reference/changelog.adoc[] diff --git a/docs/modules/ROOT/pages/Bundled_Libraries.adoc b/docs/modules/ROOT/pages/Bundled_Libraries.adoc deleted file mode 100644 index f8090b0951d..00000000000 --- a/docs/modules/ROOT/pages/Bundled_Libraries.adoc +++ /dev/null @@ -1,43 +0,0 @@ -= Bundled Libraries - -:page-aliases: External_References.adoc - -Mill comes bundled with a set of external Open Source libraries and projects. - -== OS-lib - -Project page:: https://github.com/com-lihaoyi/os-lib -ScalaDoc:: https://javadoc.io/doc/com.lihaoyi/os-lib_2.13/latest/index.html - -include::example/external/libraries/1-oslib.adoc[] - -== uPickle - -Project page:: https://github.com/com-lihaoyi/upickle -ScalaDoc:: https://javadoc.io/doc/com.lihaoyi/upickle_2.13/latest/index.html - -include::example/external/libraries/2-upickle.adoc[] - -== Requests-Scala - -Project page:: https://github.com/com-lihaoyi/requests-scala -ScalaDoc:: https://javadoc.io/doc/com.lihaoyi/requests_2.13/latest/index.html - -include::example/external/libraries/3-requests.adoc[] - -== MainArgs - - -Project page:: https://github.com/com-lihaoyi/mainargs -Scaladoc:: https://javadoc.io/doc/com.lihaoyi/mainargs_2.13/latest/index.html - -include::example/external/libraries/4-mainargs.adoc[] - -== Coursier - -Coursier is the Scala application and artifact manager. Mill uses Coursier for -all third-party artifact resolution and management in JVM languages (Scala, -Java, etc.) - -Project page:: https://github.com/coursier/coursier -Documentation:: https://get-coursier.io/docs/overview \ No newline at end of file diff --git a/docs/modules/ROOT/pages/Case_Study_Mill_vs_Gradle.adoc b/docs/modules/ROOT/pages/Case_Study_Mill_vs_Gradle.adoc deleted file mode 100644 index 835b8024afa..00000000000 --- a/docs/modules/ROOT/pages/Case_Study_Mill_vs_Gradle.adoc +++ /dev/null @@ -1,239 +0,0 @@ -= Case Study: Mill vs Gradle - -++++ - -++++ - -This page compares using Mill to Gradle, using the https://github.com/mockito/mockito[Mockito Testing Library] -codebase as the example. Mockito is a medium sized codebase, 100,000 lines of Java split over 22 -subprojects. By porting it to Mill, this case study should give you -an idea of how Mill compares to Gradle in more realistic, real-world projects. - -To do this, we have written a Mill `build.mill` file for the Mockito project. This can be used -with Mill to build and test the various submodules of the Mockito project without needing to -change any other files in the repository: - -- https://github.com/com-lihaoyi/mill/blob/main/example/thirdparty/mockito/build.mill[Mockito build.mill file] - -== Completeness - -The Mill build for Mockito is not 100% complete, but it covers most of the major parts of Mockito: -compiling Java, running JUnit tests. For now, the Android, Kotlin, and OSGI tests are skipped. - -The goal of this exercise is not to be 100% feature complete enough to replace the Gradle build -today. It is instead meant to provide a realistic comparison of how using Mill in a realistic, -real-world project compares to using Gradle. - -== Performance - -The Mill build for Mockito is generally snappier than the Gradle build. This applies to -most workflows, but the difference matters most for workflows which are short-lived, -where the difference in the fixed overhead of the build tool is most noticeable. - -For comparison purposes, I disabled the Gradle subprojects that we did not fully implement in Mill -(`groovyTest`, `groovyInlineTest`, `kotlinTest`, `kotlinReleaseCoroutinesTest`, `android`, -`osgi-test`, `java21-test`). - -For the benchmarks below, each provided number is the median wall time of three consecutive runs -on my M1 Macbook Pro. While ad-hoc, these benchmarks are enough to give you a flavor of how -Mill's performance compares to Maven: - -[cols="1,1,1,1"] -|=== -| Benchmark | Gradle | Mill | Speedup - -| <> | 17.6s | 5.40s | 3.3x -| <> | 12.3s | 3.57s | 3.4x -| <> | 4.41s | 1.20s | 3.7x -| <> | 1.37s | 0.51s | 2.7x -| <> | 0.94s | 0.46s | 2.0x -|=== - -The column on the right shows the speedups of how much faster Mill is compared to the -equivalent Maven workflow. In most cases, Mill is 5-10x faster than Gradle. Below, we -will go into more detail of each benchmark: how they were run, what they mean, and how -we can explain the difference in performing the same task with the two different build tools. - -=== Sequential Clean Compile All - -```bash -$ ./gradlew clean; time ./gradlew classes testClasses --no-build-cache -17.6s -18.2s -17.4s - -$ ./mill clean; time ./mill -j 1 __.compile -5.60s -5.40s -6.13s -``` - -This benchmark measures the time taken to sequentially compiled all the Java code in -the Mockito code base. The goal of this benchmark is to try and measure the "clean compile -everything" step, without the effects of parallelism that can be nondeterministic and vary -wildly from machine to machine depending on the number of cores available. - -To limit our comparison to compiling Java code only, we avoid -using `build` in favor of `classes` and `testClasses`: this skips running tests, -lint, jar/docjar generation, and other steps that `build` performs to make it an apples-to-apples -comparison. Furthermore, Gradle parallelizes the build by default and caches things globally -under `~/.gradle/caches`, while Mill parallelizes by default but does not cache things globally. -Again to make it a fair comparison, we use `--no-build-cache` in Gradle and set -`org.gradle.parallel=false` in `gradle.properties`, and pass `-j 1` to limit Mill to a -single thread. - -Here we see Mill being about ~3.3x faster than Gradle, to do the equivalent amount of work. - -=== Parallel Clean Compile All - -```bash -$ ./gradlew clean; time ./gradlew classes testClasses --no-build-cache -13.8s -12.3s -11.4s - -$ ./mill clean; time ./mill -j 10 __.compile -3.59s -3.57s -3.45s -``` - -This benchmark is identical to the <> benchmark above, but enables -parallelism: Gradle by default, Mill via `-j 10` to run on 10 cores (the number on my Macbook Pro). - -Neither Gradle nor Mill benefit hugely from parallelism: both show a moderate ~50% speedup, -despite receiving 900% more CPUs. This likely indicates that the module dependency graph -of the Mockito codebase is laid out in a way that does not allow huge amounts of compile-time -parallelism. - -Again, we see Mill being about ~3.4x faster than Gradle, to do the equivalent amount of work. -This indicates the the speedup Mill provides over Gradle is unrelated to the parallelism of -each tool. - -=== Clean Compile Single-Module - -```bash -$ ./gradlew clean; time ./gradlew :classes --no-build-cache -4.14s -4.41s -4.41s - -$ ./mill clean; time ./mill compile -1.20s -1.12s -1.30s -``` - -This benchmark indicates the use case of clean-compiling a single module. In this case, -the root module in `src/main/java/` containing the bulk of the Mockito library code, -_exluding_ the test code in `src/test/java/` and all the downstream subprojects in -`subprojects/`. - -This benchmark gives us Mill being about ~3.7x faster than Gradle. This is in line with -the results above. - -=== Incremental Compile Single-Module - -```bash -$ echo "" >> src/main/java/org/mockito/BDDMockito.java; time ./gradlew :classes -1.37s -1.39s -1.28s - -$ echo "" >> src/main/java/org/mockito/BDDMockito.java; time ./mill compile -compiling 1 Java source to /Users/lihaoyi/Github/netty/out/common/compile.dest/classes ... -0.52s -0.51s -0.52s -``` - -This benchmark measures the common case of making a tiny change to a single file and -re-compiling just that module. This is the common workflow that most software developers -do over and over day-in and day-out. We simulate this by appending a new line to the -file `src/main/java/org/mockito/BDDMockito.java`. - -Both Mill and Gradle are able to take advantage of the small code change and re-compile -only the single files needing re-compilation, demonstrating substantial speedups over -the <> benchmark above. Mill remains faster than Gradle, -showing a ~2.7x speedup for this task - -=== No-Op Compile Single-Module - -```bash -$ time ./gradlew :classes -0.95s -0.93s -0.94s - -$ time ./mill common.compile -0.46s -0.50s -0.45s -``` - -This benchmark is meant to measure the pure overhead of running the build tool: given a single -module that did _not_ change, the build tool should need to do _nothing_ in response, and so -any time taken is pure overhead. - -For both Mill and Gradle, we see small speedups relative to the <> -benchmark above, which likely comes from not having to compile any Java source files at all. Mill -remains faster than Gradle by about 2.0x. - -== Debugging Tooling - -Another area that Mill does better than Gradle is providing builtin tools for you to understand -what your build is doing. For example, the Mockito project build discussed has 22 submodules -and associated test suites, but how do these different modules depend on each other? With -Mill, you can run `./mill visualize __.compile`, and it will show you how the -`compile` task of each module depends on the others: - -image::MockitoCompileGraph.svg[] - -Apart from the static dependency graph, another thing of interest may be the performance -profile and timeline: where the time is spent when you actually compile everything. With -Mill, when you run a compilation using `./mill -j 10 __.compile`, you automatically get a -`out/mill-chrome-profile.json` file that you can load into your `chrome://tracing` page and -visualize where your build is spending time and where the performance bottlenecks are: - -image::MockitoCompileProfile.png[] - -If you want to inspect the tree of third-party dependencies used by any module, the -built in `ivyDepsTree` command lets you do that easily: - -```bash -$ ./mill subprojects.junit-jupiter.ivyDepsTree -├─ org.junit.jupiter:junit-jupiter-api:5.10.3 -│ ├─ org.apiguardian:apiguardian-api:1.1.2 -│ ├─ org.junit.platform:junit-platform-commons:1.10.3 -│ │ └─ org.apiguardian:apiguardian-api:1.1.2 -│ └─ org.opentest4j:opentest4j:1.3.0 -└─ org.objenesis:objenesis:3.3 -``` - -None of these tools are rocket science, but Mill provides all of them out of the -box in a convenient package for you to use. Whether you want a visual graph layout, -a parallel performance profile, or a third-party dependency tree of your project, -Mill makes it easy and convenient without needing to fiddle with custom configuration -or third party plugins. This helps make it easy for you to explore, understand, and -take ownership of the build tool. - -== Conclusion - - -Both the Mill and Gradle builds we discussed in this case study do the same thing: they -compile Java code and run tests. Sometimes they perform additional configuration, tweaking -JVM arguments or doing ad-hoc classpath mangling. - -Mill doesn't try to do _more_ than Gradle does, but it -tries to do it _better_: faster compiles, shorter and easier to read configs, easier -extensibility via libraries. - -Again, the Mill build used in this comparison is for demonstration purposes, and more -work would be necessary to make the Mill build production ready: publishing configuration, -code coverage integration, and so on. However, hopefully it demonstrates the potential value: -significantly improved performance, so that you spend less time waiting for your code to -compile and more time doing the work that actually matters, with builtin debugging tools -to help turn normally opaque "build config" into something that's transparent and -easily understandable. diff --git a/docs/modules/ROOT/pages/Case_Study_Mill_vs_Maven.adoc b/docs/modules/ROOT/pages/Case_Study_Mill_vs_Maven.adoc deleted file mode 100644 index a0202f504f3..00000000000 --- a/docs/modules/ROOT/pages/Case_Study_Mill_vs_Maven.adoc +++ /dev/null @@ -1,651 +0,0 @@ -= Case Study: Mill vs Maven - -++++ - -++++ - -This page compares using Mill to Maven, using the https://github.com/netty/netty[Netty Network Server] -codebase as the example. Netty is a large, old codebase. 500,000 lines of Java, written by -over 100 contributors across 15 years, split over 47 subprojects, with over 10,000 lines of -Maven `pom.xml` configuration alone. By porting it to Mill, this case study should give you -an idea of how Mill compares to Maven in larger, real-world projects. - -To do this, we have written a Mill `build.mill` file for the Netty project. This can be used -with Mill to build and test the various submodules of the Netty project without needing to -change any other files in the repository: - -- https://github.com/com-lihaoyi/mill/blob/main/example/thirdparty/netty/build.mill[Netty build.mill file] - -== Completeness - -The Mill build for Netty is not 100% complete, but it covers most of the major parts of Netty: -compiling Java, compiling and linking C code via JNI, running JUnit tests and some integration -tests using H2Spec. All 47 Maven subprojects are modelled using Mill, with the entire Netty codebase -being approximately 500,000 lines of code. - -```bash -$ git ls-files | grep \\.java | xargs wc -l -... -513805 total -``` - -The goal of this exercise is not to be 100% feature complete enough to replace the Maven build -today. It is instead meant to provide a realistic comparison of how using Mill in a large, -complex project compares to using Maven. - -Both Mill and Maven builds end up compiling the same set of files, although the number being -reported by the command line is slightly higher for Mill (2915 files) than Maven (2822) due -to differences in the reporting (e.g. Maven does not report `package-info.java` files as part -of the compiled file count). - -== Performance - -The Mill build for Netty is much more performant than the default Maven build. This applies to -most workflows. - -For the benchmarks below, each provided number is the wall time of three consecutive runs -on my M1 Macbook Pro. While ad-hoc, these benchmarks are enough to give you a flavor of how -Mill's performance compares to Maven: - -[cols="1,1,1,1"] -|=== -| Benchmark | Maven | Mill | Speedup - -| <> | 2m 31.12s | 0m 22.19s | 6.8x - -| <> | 1m 16.45s | 0m 09.95s | 7.7x -| <> | 0m 19.62s | 0m 02.17s | 9.0x -| <> | 0m 21.10s | 0m 00.54s | 39.1x -| <> | 0m 17.34s | 0m 00.47s | 39.1x -|=== - -The column on the right shows the speedups of how much faster Mill is compared to the -equivalent Maven workflow. In most cases, Mill is 5-10x faster than Maven. Below, we -will go into more detail of each benchmark: how they were run, what they mean, and how -we can explain the difference in performing the same task with the two different build tools. - -=== Sequential Clean Compile All - -```bash -$ time ./mvnw -DskipTests -Dcheckstyle.skip -Denforcer.skip=true clean install -2m 42.96s -2m 27.58s -2m 31.12s - -$ ./mill clean; time ./mill __.compile -0m 29.14s -0m 22.19s -0m 20.79s -``` - -This benchmark exercises the simple "build everything from scratch" workflow, with all remote -artifacts already in the local cache. The actual files -being compiled are the same in either case (as mentioned in the <> section). -I have explicitly disabled the various linters and tests for the Maven build, to just focus -on the compilation of Java source code making it an apples-to-apples comparison. - -As a point of reference, Java typically compiles at 10,000-50,000 lines per second on a -single thread, and the Netty codebase is ~500,000 lines of code, so we would expect compile -to take 10-50 seconds without parallelism. -The 20-30s taken by Mill seems about what you would expect for a codebase of this size, -and the ~150s taken by Maven is far beyond what you would expect from simple Java compilation. - -==== Where is Maven spending its time? -From eyeballing the logs, the added overhead comes from things like: - -_Downloading Metadata from Maven Central_ - -```text -Downloading from sonatype-nexus-snapshots: https://oss.sonatype.org/content/repositories/snapshots/io/netty/netty-transport-native-unix-common/maven-metadata.xml -Downloading from central: https://repo.maven.apache.org/maven2/io/netty/netty-transport-native-unix-common/maven-metadata.xml -Downloaded from central: https://repo.maven.apache.org/maven2/io/netty/netty-transport-native-unix-common/maven-metadata.xml (4.3 kB at 391 kB/s) -Downloaded from sonatype-nexus-snapshots: https://oss.sonatype.org/content/repositories/snapshots/io/netty/netty-transport-native-unix-common/maven-metadata.xml (2.7 kB at 7.4 kB/s) -``` - -_Comparing Jars_ - -```text -Comparing [io.netty:netty-transport-sctp:jar:4.1.112.Final] against [io.netty:netty-transport-sctp:jar:4.1.113.Final-SNAPSHOT] (including their transitive dependencies). -``` - -In general, Maven spends much of time working with Jar files: packing them, unpacking them, -comparing them, etc. None of this is strictly necessary for compiling Java source files to -classfiles! But if they are not necessary, then why is Maven doing it? It turns out the -reason comes own to the difference of `mvn compile` vs `mvn install` - -==== Maven Compile vs Install - -In general, the reason we have to use `./mvwn install` rather than `./mvnw compile` is that -Maven's main mechanism for managing inter-module dependencies is via the local artifact cache -at `~/.m2/repository`. Although many workflows work with `compile`, some don't, and -`./mvnw clean compile` on the Netty repository fails with: - -```text -[ERROR] Failed to execute goal org.apache.maven.plugins:maven-dependency-plugin:2.10:unpack-dependencies -(unpack) on project netty-resolver-dns-native-macos: Artifact has not been packaged yet. -When used on reactor artifact, unpack should be executed after packaging: see MDEP-98. -> [Help 1] -[ERROR] -[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch. -[ERROR] Re-run Maven using the -X switch to enable full debug logging. -[ERROR] -[ERROR] For more information about the errors and possible solutions, please read the following articles: -[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoExecutionException -[ERROR] -[ERROR] After correcting the problems, you can resume the build with the command -[ERROR] mvn -rf :netty-resolver-dns-native-macos -``` - -In contrast, Mill builds do not rely on the local artifact cache, even though Mill is able -to publish to it. That means Mill builds are able to work directly with classfiles on disk, -simply referencing them and using them as-is without spending time packing and unpacking them -into `.jar` files. Furthermore, even if we _did_ want Mill to generate the ``.jar``s, the -overhead of doing so is just a few seconds, far less than the two entire minutes that -Maven's overhead adds to the clean build: - -```bash -$ time ./mvnw -DskipTests -Dcheckstyle.skip -Denforcer.skip=true clean install -2m 42.96s -2m 27.58s -2m 31.12s - -$ ./mill clean; time ./mill __.compile -0m 29.14s -0m 22.19s -0m 20.79s - -$ ./mill clean; time ./mill __.jar -0m 32.58s -0m 24.90s -0m 23.35s -``` - -From this benchmark, we can see that although both Mill and Maven are doing the same work, -Mill takes about as long as it _should_ for this task of compiling 500,000 lines of Java source -code, while Maven takes considerably longer. And much of this overhead comes from Maven -doing unnecessary work packing/unpacking jar files and publishing to a local repository, -whereas Mill directly uses the classfiles generated on disk to bypass all that work. - -=== Parallel Clean Compile All - -```bash -$ time ./mvnw -T 4 -DskipTests -Dcheckstyle.skip -Denforcer.skip=true clean install -1m 19.58s -1m 16.34s -1m 16.45s - -$ ./mill clean; time ./mill -j 4 __.compile -0m 14.80s -0m 09.95s -0m 08.83s -``` - -This example compares Maven v.s. Mill, when performing the clean build on 4 threads. -Both build tools support parallelism (`-T 4` in Maven and `-j 4` in Mill), and both -tools see a similar ~2x speedup for building the Netty project using 4 threads. Again, -this tests a clean build using `./mvnw clean` or `./mill clean`. - -This comparison shows that much of Mill's speedup over Maven is unrelated to parallelism. -Whether sequential or parallel, Mill has approximately the same ~7x speedup over Maven -when performing a clean build of the Netty repository. - -=== Clean Compile Single-Module - -```bash -$ time ./mvnw -pl common -DskipTests -Dcheckstyle.skip -Denforcer.skip=true clean install -0m 19.62s -0m 20.52s -0:19:50 - -$ ./mill clean common; time ./mill common.test.compile -0m 04.94s -0m 02.17s -0m 01.95s -``` - -This exercise limits the comparison to compiling a single module, in this case `common/`. -`./mvnw -pl common install` compiles both the `main/` and `test/` sources, whereas -`./mill common.compile` would only compile the `main/` sources, and we need to explicitly -reference `common.test.compile` to compile both (because `common.test.compile` depends on -`common.compile`, `common.compile` gets run automatically) - -Again, we can see a significant speedup of Mill v.s. Maven remains even when compiling a -single module: a clean compile of `common/` is about 9x faster with Mill than with Maven! -Again, `common/` is about 40,000 lines of Java source code, so at 10,000-50,000 lines per -second we would expect it to compile in about 1-4s. That puts Mill's compile times right -at what you would expect, whereas Maven's has a significant overhead. - - -=== Incremental Compile Single-Module - -```bash -$ echo "" >> common/src/main/java/io/netty/util/AbstractConstant.java -$ time ./mvnw -pl common -DskipTests -Dcheckstyle.skip -Denforcer.skip=true install -Compiling 174 source files to /Users/lihaoyi/Github/netty/common/target/classes -Compiling 60 source files to /Users/lihaoyi/Github/netty/common/target/test-classes - -0m 21.10s -0m 19.64s -0:21:29 - - -$ echo "" >> common/src/main/java/io/netty/util/AbstractConstant.java -$ time ./mill common.test.compile -compiling 1 Java source to /Users/lihaoyi/Github/netty/out/common/compile.dest/classes ... - -0m 00.78s -0m 00.54s -0m 00.51s -``` - -This benchmark explores editing a single file and re-compiling `common/`. - -Maven by default takes about as long to re-compile ``common/``s `main/` and `test/` sources -after a single-line edit as it does from scratch, about 20 seconds. However, Mill -takes just about 0.5s to compile and be done! Looking at the logs, we can see it is -because Mill only compiles the single file we changed, and not the others. - -For this incremental compilation, Mill uses the -https://github.com/sbt/zinc[Zinc Incremental Compiler]. Zinc is able to analyze the dependencies -between files to figure out what needs to re-compile: for an internal change that doesn't -affect downstream compilation (e.g. changing a string literal) Zinc only needs to compile -the file that changed, taking barely half a second: - -```diff -$ git diff -diff --git a/common/src/main/java/io/netty/util/AbstractConstant.java b/common/src/main/java/io/netty/util/AbstractConstant.java -index de16653cee..9818f6b3ce 100644 ---- a/common/src/main/java/io/netty/util/AbstractConstant.java -+++ b/common/src/main/java/io/netty/util/AbstractConstant.java -@@ -83,7 +83,7 @@ public abstract class AbstractConstant> implements - return 1; - } - -- throw new Error("failed to compare two different constants"); -+ throw new Error("failed to compare two different CONSTANTS!!"); - } - - } -``` -```bash -$ time ./mill common.test.compile -[info] compiling 1 Java source to /Users/lihaoyi/Github/netty/out/common/compile.dest/classes ... -0m 00.55s6 -``` - -In contrast, a change to a class or function public signature (e.g. adding a method) may -require downstream code to re-compile, and we can see that below: - -```diff -$ git diff -diff --git a/common/src/main/java/io/netty/util/AbstractConstant.java b/common/src/main/java/io/netty/util/AbstractConstant.java -index de16653cee..f5f5a93e0d 100644 ---- a/common/src/main/java/io/netty/util/AbstractConstant.java -+++ b/common/src/main/java/io/netty/util/AbstractConstant.java -@@ -41,6 +41,10 @@ public abstract class AbstractConstant> implements - return name; - } - -+ public final String name2() { -+ return name; -+ } -+ - @Override - public final int id() { - return id; -``` -```bash -$ time ./mill common.test.compile -[25/48] common.compile -[info] compiling 1 Java source to /Users/lihaoyi/Github/netty/out/common/compile.dest/classes ... -[info] compiling 2 Java sources to /Users/lihaoyi/Github/netty/out/common/compile.dest/classes ... -[info] compiling 4 Java sources to /Users/lihaoyi/Github/netty/out/common/compile.dest/classes ... -[info] compiling 3 Java sources to /Users/lihaoyi/Github/netty/out/common/test/compile.super/mill/scalalib/JavaModule/compile.dest/classes ... -[info] compiling 1 Java source to /Users/lihaoyi/Github/netty/out/common/test/compile.super/mill/scalalib/JavaModule/compile.dest/classes ... -0m 00.81s2 -``` - -Here, we can see that Zinc ended up re-compiling 7 files in `common/src/main/` and 3 files -in `common/src/test/` as a result of adding a method to `AbstractConstant.java`. - -In general, Zinc is conservative, and does not always end up selecting the minimal set of -files that need re-compiling: e.g. in the above example, the new method `name2` does not -interfere with any existing method, and the ~9 downstream files did not actually need to -be re-compiled! However, even conservatively re-compiling 9 files is much faster than -Maven blindly re-compiling all 234 files, and as a result the iteration loop of -editing-compiling-testing your Java projects in Mill can be much faster than doing -the same thing in Maven - -=== No-Op Compile Single-Module - -```bash -$ time ./mvnw -pl common -DskipTests -Dcheckstyle.skip -Denforcer.skip=true install -0m 16.34s -0m 17.34s -0m 18.28s - -$ time ./mill common.test.compile -0m 00.49s -0m 00.47s -0m 00.45s -``` - -This last benchmark explores the boundaries of Maven and Mill: what happens if -we ask to compile a single module _that has already been compiled_? In this case, -there is literally _nothing to do_. For Maven, "doing nothing" takes ~17 seconds, -whereas for Mill we can see it complete and return in less than 0.5 seconds - -Grepping the logs, we can confirm that both build tools skip re-compilation of the -`common/` source code. In Maven, skipping compilation only saves us ~2 seconds, -bringing down the 19s we saw in <> to 17s here. This -matches what we expect about Java compilation speed, with the 2s savings on -40,000 lines of code telling us Java compiles at ~20,000 lines per second. However, -we still see Maven taking *17 entire seconds* before it can decide to do nothing! - -In contrast, doing the same no-op compile using Mill, we see the timing from 2.2s -in <> to 0.5 seconds here. This is the same ~2s reduction -we saw with Maven, but due to Mill's minimal overhead, in the end the command -finishes in less than half a second. - - -== Extensibility - -Even though Maven is designed to be declarative, in many real-world codebases you end -up needing to run ad-hoc scripts and logic. This section will explore two such scenarios, -so you can see how Mill differs from Maven in the handling of these requirements. - -=== Groovy - -The Maven build for the `common/` subproject -uses a Groovy script for code generation. This is configured via: - -```xml - - ${project.basedir}/src/main/templates - ${project.basedir}/src/test/templates - ${project.build.directory}/generated-sources/collections/java - ${project.build.directory}/generated-test-sources/collections/java - - - org.codehaus.gmaven - groovy-maven-plugin - 2.1.1 - - - org.codehaus.groovy - groovy - 3.0.9 - - - ant - ant-optional - 1.5.3-1 - - - - - generate-collections - generate-sources - - execute - - - ${project.basedir}/src/main/script/codegen.groovy - - - - -``` - -In contrast, the Mill build configures the code generation as follows: - -```scala -import $ivy.`org.codehaus.groovy:groovy:3.0.9` -import $ivy.`org.codehaus.groovy:groovy-ant:3.0.9` -import $ivy.`ant:ant-optional:1.5.3-1` - -object common extends NettyModule{ - ... - def script = T.source(millSourcePath / "src" / "main" / "script") - def generatedSources0 = T{ - val shell = new groovy.lang.GroovyShell() - val context = new java.util.HashMap[String, Object] - - context.put("collection.template.dir", "common/src/main/templates") - context.put("collection.template.test.dir", "common/src/test/templates") - context.put("collection.src.dir", (T.dest / "src").toString) - context.put("collection.testsrc.dir", (T.dest / "testsrc").toString) - - shell.setProperty("properties", context) - shell.setProperty("ant", new groovy.ant.AntBuilder()) - - shell.evaluate((script().path / "codegen.groovy").toIO) - - (PathRef(T.dest / "src"), PathRef(T.dest / "testsrc")) - } - - def generatedSources = T{ Seq(generatedSources0()._1)} -} -``` - -While the number of lines of code _written_ is not that different, the Mill configuration -is a lot more direct: rather than writing 35 lines of XML to configure an opaque third-party -plugin, we instead write 25 lines of code to directly do what we want: import `groovy`, -configure a `GroovyShell`, and use it to evaluate our `codegen.groovy` script. - -This direct control means you are not beholden to third party plugins: rather than being -limited to what an existing plugin _allows_ you to do, Mill allows you to directly write -the code necessary to do what _you need to do_. - -=== Calling Make - -The Maven build for the `transport-native-unix-common/` subproject needs to call -`make` in order to compile its C code to modules that can be loaded into Java applications -via JNI. Maven does this via the `maven-dependency-plugin` and `maven-antrun-plugin` which are -approximately configured as below: - -```xml - - make - gcc - ar - libnetty-unix-common - ${project.basedir}/src/main/c - ${project.build.directory}/netty-jni-util/ - ${project.build.directory}/native-jar-work - ${project.build.directory}/native-objs-only - ${project.build.directory}/native-lib-only - - - - - org.apache.maven.plugins - maven-dependency-plugin - - - - unpack - generate-sources - - unpack-dependencies - - - io.netty - netty-jni-util - sources - ${jniUtilIncludeDir} - **.h,**.c - false - true - - - - - - maven-antrun-plugin - - - - build-native-lib - generate-sources - - run - - - - - - - - - - - - - - - - - - - - - -``` - -The `maven-dependency-plugin` is used to download and unpack a single `jar` file, -while `maven-antrun-plugin` is used to call `make`. Both are configured via XML, -with the `make` command essentially being a bash script wrapped in layers of XML. - -In contrast, the Mill configuration for this logic is as follows: - -```scala -def makefile = T.source(millSourcePath / "Makefile") -def cSources = T.source(millSourcePath / "src" / "main" / "c") -def cHeaders = T{ - for(p <- os.walk(cSources().path) if p.ext == "h"){ - os.copy(p, T.dest / p.relativeTo(cSources().path), createFolders = true) - } - PathRef(T.dest) -} - -def make = T{ - os.copy(makefile().path, T.dest / "Makefile") - os.copy(cSources().path, T.dest / "src" / "main" / "c", createFolders = true) - - val Seq(sourceJar) = resolveDeps( - deps = T.task(Agg(ivy"io.netty:netty-jni-util:0.0.9.Final").map(bindDependency())), - sources = true - )().toSeq - - os.proc("jar", "xf", sourceJar.path).call(cwd = T.dest / "src" / "main" / "c") - - os.proc("make").call( - cwd = T.dest, - env = Map( - "CC" -> "clang", - "AR" -> "ar", - "JNI_PLATFORM" -> "darwin", - "LIB_DIR" -> "lib-out", - "OBJ_DIR" -> "obj-out", - "MACOSX_DEPLOYMENT_TARGET" -> "10.9", - "CFLAGS" -> Seq( - "-mmacosx-version-min=10.9", "-O3", "-Werror", "-Wno-attributes", "-fPIC", - "-fno-omit-frame-pointer", "-Wunused-variable", "-fvisibility=hidden", - "-I" + sys.props("java.home") + "/include/", - "-I" + sys.props("java.home") + "/include/darwin", - "-I" + sys.props("java.home") + "/include/linux", - ).mkString(" "), - "LD_FLAGS" -> "-Wl,--no-as-needed -lrt -Wl,-platform_version,macos,10.9,10.9", - "LIB_NAME" -> "libnetty-unix-common" - ) - ) - - (PathRef(T.dest / "lib-out"), PathRef(T.dest / "obj-out")) -} -``` - -[graphviz] -.... -digraph G { - rankdir=LR - node [shape=box width=0 height=0 style=filled fillcolor=white] - makefile -> make - cSources -> make - cSources -> cHeaders -} -.... - -In Mill, we define the `makefile`, `cSources`, `cHeaders`, and `make` tasks. The bulk -of the logic is in `def make`, which prepares the `makefile` and C sources, -resolves the `netty-jni-util` source jar and unpacks it with `jar xf`, and calls `make` -with the given environment variables. Both `cHeaders` and the output of `make` are used -in downstream modules. - -Again, the Maven XML and Mill code contains exactly the same logic, and neither is -much more concise or verbose than the other. Rather, what is interesting is that -it is much easier to work with this kind of _build logic_ via _concise type-checked code_, -rather than configuring a bunch of third-party plugins to try and achieve what you want. - - -== Debugging Tooling - -Another area that Mill does better than Maven is providing builtin tools for you to understand -what your build is doing. For example, the Netty project build discussed has 47 submodules -and associated test suites, but how do these different modules depend on each other? With -Mill, you can run `./mill visualize __.compile`, and it will show you how the -`compile` task of each module depends on the others: - -image::NettyCompileGraph.svg[] - -Apart from the static dependency graph, another thing of interest may be the performance -profile and timeline: where the time is spent when you actually compile everything. With -Mill, when you run a compilation using `./mill -j 10 __.compile`, you automatically get a -`out/mill-chrome-profile.json` file that you can load into your `chrome://tracing` page and -visualize where your build is spending time and where the performance bottlenecks are: - -image::NettyCompileProfile.png[] - -If you want to inspect the tree of third-party dependencies used by any module, the -built in `ivyDepsTree` command lets you do that easily: - -```bash -$ ./mill handler.ivyDepsTree -├─ org.jctools:jctools-core:4.0.5 -├─ org.junit.jupiter:junit-jupiter-api:5.9.0 -│ ├─ org.apiguardian:apiguardian-api:1.1.2 -│ ├─ org.junit.platform:junit-platform-commons:1.9.0 -│ │ └─ org.apiguardian:apiguardian-api:1.1.2 -│ └─ org.opentest4j:opentest4j:1.2.0 -└─ com.google.protobuf:protobuf-java:2.6.1 -``` - -None of these tools are rocket science, but Mill provides all of them out of the -box in a convenient package for you to use. Whether you want a visual graph layout, -a parallel performance profile, or a third-party dependency tree of your project, -Mill makes it easy and convenient without needing to fiddle with custom configuration -or third party plugins. This helps make it easy for you to explore, understand, and -take ownership of the build tool. - -== Conclusion - -Both the Mill and Maven builds we discussed in this case study do the same thing: they -compile Java code, zip them into Jar files, run tests. Sometimes they compile and link -C code or run `make` or Groovy. - -Mill doesn't try to do _more_ than Maven does, but it -tries to do it _better_: faster compiles, shorter and easier to read configs, easier -extensibility via libraries (e.g. `org.codehaus.groovy:groovy`) and subprocesses -(e.g. `make`). - -Again, the Mill build used in this comparison is for demonstration purposes, and more -work would be necessary to make the Mill build production ready: compatibility with -different operating system architectures, Java versions, and so on. However, hopefully -it demonstrates the potential value: improved performance, conciseness of the build logic, -and easy extensibility so you can fine-tune your build logic to your requirements. -Mill provides builtin tools to help you navigate, -visualize, and understand your build, turning a normally opaque "build config" into -something that's transparent and easily understandable. \ No newline at end of file diff --git a/docs/modules/ROOT/pages/Case_Study_Mill_vs_SBT.adoc b/docs/modules/ROOT/pages/Case_Study_Mill_vs_SBT.adoc deleted file mode 100644 index 91acb7f7f65..00000000000 --- a/docs/modules/ROOT/pages/Case_Study_Mill_vs_SBT.adoc +++ /dev/null @@ -1,295 +0,0 @@ -= Case Study: Mill vs SBT - -++++ - -++++ - -This page compares using Mill to SBT, using the https://github.com/gatling/gatling[Gatling Load Testing Framework] -codebase as the example. Gatling is a medium sized codebase, 40,000 lines of Scala split over 21 -subprojects. By porting it to Mill, this case study should give you an idea of how Mill compares -to SBT in more realistic, real-world projects. - -In general, in the ideal case Mill and SBT have similar performance: caching, parallelism, incremental -compilation, and so on. Mill's main advantage over SBT is its simplicity: - -* You do not need to keep a live SBT session to maximize performance, exit SBT to run Bash commands, - or juggle multiple terminal windows to run SBT in one and Bash in another. Instead, you can just - run Mill like any command line tool, and Mill caches and parallelizes to maximize performance automatically - -* Mill's IDE support is better than SBTs due to how Mill is designed: peek-at-documentation, - jump-to-definition, find-overrides, etc. is much more useful since your IDE understands Mill - much better than it understands SBT. - -* Mill comes with a lot of useful debugging tools builtin, without needing to juggle external plugins: - visualizing subproject dependencies, performance profiles, third-party dependency trees. This helps - you understand what your Mill build is doing. - -To do this comparison, we have written a Mill `build.mill` file for the Gatling project. This can be used -with Mill to build and test the various submodules of the Gatling project without needing to -change any other files in the repository: - -- https://github.com/com-lihaoyi/mill/blob/main/example/thirdparty/gatling/build.mill[Gatling build.mill file] - -== Completeness - -The Mill build for Gatling is not 100% complete, but it covers most of the major parts of Gatling: -compiling Scala, running tests. It does not currently cover linting via -https://github.com/diffplug/spotless[Spotless], as that is not built-in to Mill, but it could be -added as necessary. - -The goal of this exercise is not to be 100% feature complete enough to replace the SBT build -today. It is instead meant to provide a realistic comparison of how using Mill in a realistic, -real-world project compares to using SBT. - -== Performance - -[cols="1,1,1,1"] -|=== -| Benchmark | Cold SBT | Hot SBT | Mill - -| <> | 28.7s | 12s | 10.4s -| <> | 10.1s | 1s | 0.96s -| <> | 6.2s | 0s | 0.48s -| <> | 4.2s | 0s | 0.40s -|=== - -SBT can be used in two modes, either "cold" run directly from the command line, or "hot" -where an SBT session is kept open and commands are run within in. I provide the timings for -both scenarios above, along with the time taken for Mill commands. Mill does not have this -distinction, and can only be run directly from the command line. The `Hot SBT` mode only -reports timings to the nearest second, so that is the number used in this comparison. - -The Mill build benchmarks for Gatling is generally much snappier than the `Cold SBT` benchmark, -and comparable to that `Hot SBT` benchmark. Mill does not have the same _Cold vs Hot_ -distinction that SBT has, as Mill is always run "cold" from the command line and keeps the -process around to provide "hot" performance automatically. - -For the benchmarks above, each provided number is the median wall time of three consecutive runs -on my M1 Macbook Pro. While ad-hoc, these benchmarks are enough to give you a flavor of how -Mill's performance compares to SBT. For a fair comparison, we disabled `gatling-build-plugin` -in the sbt setup, which bundles the various scalafmt/scalafix/etc. linters as part of `compile`, -since Mill doesn't bundle them and instead expects them to be invoked separately. - -=== Parallel Clean Compile All - -```bash -$ sbt clean; time sbt compile -28.7s -27.6s -31.2s - -$ sbt - -sbt> clean; compile -12s -12s -10s - -$ ./mill clean; time ./mill -j 10 __.compile -10.7s -9.4s -10.4s -``` - -This benchmark measures the time taken to parallel compile all the Java and Scala code in -the Gatling code base. We configure Mill to do the same using the same number of threads -(10 on my laptop) as SBT uses. As SBT runs in parallel by default, we do not have a -comparison for sequential execution times. - -=== Clean Compile Single-Module - -```bash -> sbt clean; time sbt gatling-commons/compile -10.1 -10.7 -10.1 - -sbt> clean; gatling-common/compile -1s -1s -1s - -$ ./mill clean; time ./mill gatling-common.compile -0.96s -0.95s -0.96s -``` - -This benchmark indicates the use case of clean-compiling a single module. In this case, -the `gatling-commons` module's application code in `commons/, _exluding_ the test code in -and all the downstream submodules. - -=== Incremental Compile Single-Module - -```bash -$ echo "" >> gatling-commons/src/main/scala/io/gatling/commons/util/Arrays.scala -$ time sbt gatling-commons/compile -6.6s -6.2s -6.0s - -sbt> gatling-commons/compile -0s -0s -0s - -$ echo "" >> gatling-commons/src/main/scala/io/gatling/commons/util/Arrays.scala -$ time ./mill gatling-commons.compile -0.49s -0.48s -0.47s -``` - -This benchmark measures the common case of making a tiny change to a single file and -re-compiling just that module. This is the common workflow that most software developers -do over and over day-in and day-out. We simulate this by appending a new line to the -file `gatling-commons/src/main/scala/io/gatling/commons/util/Arrays.scala`. - -Both Mill and SBT are able to take advantage of the small code change and re-compile -only the single files needing re-compilation, demonstrating substantial speedups over -the <> benchmark above. Both "Hot SBT" and "Mill" finish in -a fraction of a second, while "Cold SBT" has substantial overhead. - -=== No-Op Compile Single-Module - -```bash -$ time sbt gatling-commons/compile -4.2s -4.2s -4.2s - -sbt> gatling-commons/compile -0s -0s -0s - -$ time ./mill gatling-commons.compile -0.39s -0.41s -0.40s -``` - -This benchmark is meant to measure the pure overhead of running the build tool: given a single -module that did _not_ change, the build tool should need to do _nothing_ in response, and so -any time taken is pure overhead. - -Again, we see both "Hot SBT" and "Mill" finish in a fraction of a second, with the Mill numbers -showing a ~0.4s overhead to run Mill even when there is no work to do, and the "Cold SBT" has -in comparison substantial >4s overhead. - -== IDE Support - -One area that Mill does significantly better than SBT is in the IDE support. For example, although -IDEs like IntelliJ are nominally able to parse and analyze your SBT files, the assistance they can -provide is often not very useful. For example, consider the inspection and jump-to-definition experience -of looking into an SBT Task: - -image::IntellijGatlingSbtTask1.png[] -image::IntellijGatlingSbtTask2.png[] - -Or an SBT plugin: - -image::IntellijGatlingSbtPlugin1.png[] -image::IntellijGatlingSbtPlugin2.png[] - -In general, although your IDE can make sure the name of the task exists, and the type is correct, it -is unable to pull up any further information about the task: its documentation, its implementation, -usages, any upstream overridden implementations, etc.. Some of this is the limitations of the IDE, -but some of it is fundamental: because SBT makes the developer define the `val myTask` separate -from the assignment of `myTask := something`, jumping to the definition of `myTask` tells you nothing -at all: what it does, where it is assigned, etc. - -In comparison, for Mill, IDEs like Intellij are able to provide much more intelligence. e.g. when -inspecting a task, it is able to pull up the documentation comment: - -image::IntellijGatlingMillTask1.png[] - -It is able to pull up any overridden implementations of task, directly in the editor: - -image::IntellijGatlingMillTask2.png[] - -And you can easily navigate to the overriden implementations to see where they are defined and -what you are overriding: - -image::IntellijGatlingMillTask3.png[] - -Mill's equivalent of SBT plugins are just Scala traits, and again you can easily pull up their -documentation in-line in the editor or jump to their full implementation: - -image::IntellijGatlingMillPlugin1.png[] -image::IntellijGatlingMillPlugin2.png[] - -In general, navigating around your build in Mill is much more straightforward than -navigating around your build in SBT. All your normal IDE functionality works perfectly: -jump-to-definition, find-usages, peek-at-documentation, and so on. Although the Mill -and SBT builds end up doing the same basic things - compiling Scala, running tests, -zipping up jars - Mill helps de-mystify things considerably so you are never blocked -wondering what your build tool is doing. - -== Debugging Tooling - -Another area that Mill does better than SBT is providing builtin tools for you to understand -what your build is doing. For example, the Gatling project build discussed has 21 submodules -and associated test suites, but how do these different modules depend on each other? With -Mill, you can run `./mill visualize __.compile`, and it will show you how the -`compile` task of each module depends on the others: - -image::GatlingCompileGraph.svg[] - -Apart from the static dependency graph, another thing of interest may be the performance -profile and timeline: where the time is spent when you actually compile everything. With -Mill, when you run a compilation using `./mill -j 10 __.compile`, you automatically get a -`out/mill-chrome-profile.json` file that you can load into your `chrome://tracing` page and -visualize where your build is spending time and where the performance bottlenecks are: - -image::GatlingCompileProfile.png[] - -If you want to inspect the tree of third-party dependencies used by any module, the -built in `ivyDepsTree` command lets you do that easily: - -```bash -$ ./mill gatling-app.ivyDepsTree -[137/137] gatling-app.ivyDepsTree -├─ org.scala-lang:scala-library:2.13.14 -├─ io.gatling:gatling-shared-model_2.13:0.0.6 -│ ├─ io.gatling:gatling-shared-util_2.13:0.0.8 -│ │ ├─ org.scala-lang:scala-library:2.13.14 -│ │ └─ org.scala-lang.modules:scala-collection-compat_2.13:2.11.0 -│ │ └─ org.scala-lang:scala-library:2.13.14 -│ ├─ io.suzaku:boopickle_2.13:1.3.3 -│ │ └─ org.scala-lang:scala-library:2.13.14 -│ └─ org.scala-lang:scala-library:2.13.14 -├─ io.gatling:gatling-shared-cli:0.0.3 -│ └─ com.github.spotbugs:spotbugs-annotations:4.8.4 -> 4.8.6 -│ └─ com.google.code.findbugs:jsr305:3.0.2 -├─ org.simpleflatmapper:lightning-csv:8.2.3 -│ └─ org.simpleflatmapper:sfm-util:8.2.3 -├─ com.github.ben-manes.caffeine:caffeine:3.1.8 -│ ├─ com.google.errorprone:error_prone_annotations:2.21.1 -│ └─ org.checkerframework:checker-qual:3.37.0 -... -``` - -None of these tools are rocket science, but Mill provides all of them out of the -box in a convenient package for you to use. Whether you want a visual graph layout, -a parallel performance profile, or a third-party dependency tree of your project, -Mill makes it easy and convenient without needing to fiddle with custom configuration -or third party plugins. This helps make it easy for you to explore, understand, and -take ownership of the build tool. - -== Conclusion - -Both the Mill and SBT builds we discussed in this case study do the same thing: they -compile Java and Scala code and run tests. If set up and used properly, SBT builds -are performant and do what needs to be done. - -Where Mill has an advantage over SBT is in its simplicity and understandability. You -do not need to worry about using it "the wrong way" and ending up with workflows running -slower than necessary. You can explore your build using your IDE like you would any other -project, tracing task dependencies using the same jump-to-definition you use to trace -method calls in your application code. Mill provides builtin tools to help you navigate, -visualize, and understand your build, turning a normally opaque "build config" into -something that's transparent and easily understandable. - diff --git a/docs/modules/ROOT/pages/Changelog.adoc b/docs/modules/ROOT/pages/Changelog.adoc deleted file mode 100644 index ce1d4c73bab..00000000000 --- a/docs/modules/ROOT/pages/Changelog.adoc +++ /dev/null @@ -1,4 +0,0 @@ -// = Changelog -include::partial$project-readme.adoc[tag=changelog,leveloffset=-1] - -include::partial$project-readme.adoc[tag=changelogOld,leveloffset=-1] \ No newline at end of file diff --git a/docs/modules/ROOT/pages/Contrib_Plugins.adoc b/docs/modules/ROOT/pages/Contrib_Plugins.adoc deleted file mode 100644 index ba104df457a..00000000000 --- a/docs/modules/ROOT/pages/Contrib_Plugins.adoc +++ /dev/null @@ -1,58 +0,0 @@ -= Contrib Plugins -:page-aliases: Contrib_Modules.adoc - - -The ((plugins)) in this section are hosted in the Mill git tree and developed / maintained by the community. - -For details about including plugins in your `build.mill` read xref:Using_Plugins.adoc[Using Mill Plugins]. - -[CAUTION] --- -When using one of these contribution modules, it is important that the versions you load match your mill version. -To facilitate this, Mill will automatically replace the `$MILL_VERSION` literal in your ivy imports with the correct value. -You can also leave the version completely empty to default to the mill version (but don't forget to keep the trailing colon). - -For instance: - -[source,scala] ----- -import $ivy.`com.lihaoyi::mill-contrib-bloop:$MILL_VERSION` ----- - -or - -[source,scala] ----- -import $ivy.`com.lihaoyi::mill-contrib-bloop:` ----- - --- - -== List of Contrib Plugins - -// See also the list in nav.adoc -* xref:contrib/artifactory.adoc[] -* xref:contrib/bintray.adoc[] -* xref:contrib/bloop.adoc[] -* xref:contrib/buildinfo.adoc[] -* xref:Plugin_BSP.adoc[] -* xref:contrib/codeartifact.adoc[] -* xref:contrib/docker.adoc[] -* xref:contrib/errorprone.adoc[] -* xref:contrib/checkstyle.adoc[] -* xref:contrib/flyway.adoc[] -* xref:contrib/gitlab.adoc[] -* xref:contrib/jmh.adoc[] -* xref:contrib/playlib.adoc[] -* xref:contrib/proguard.adoc[] -* xref:contrib/scalapblib.adoc[] -* xref:contrib/scoverage.adoc[] -* xref:contrib/sonatypecentral.adoc[] -* xref:contrib/testng.adoc[] -* xref:contrib/twirllib.adoc[] -* xref:contrib/versionfile.adoc[] - - -== Importing Contrib Modules - -include::example/extending/imports/6-contrib-import.adoc[] diff --git a/docs/modules/ROOT/pages/Cross_Builds.adoc b/docs/modules/ROOT/pages/Cross_Builds.adoc deleted file mode 100644 index 12d261af025..00000000000 --- a/docs/modules/ROOT/pages/Cross_Builds.adoc +++ /dev/null @@ -1,51 +0,0 @@ -= Cross Builds - -Cross-building refers to taking the same sources and configuration and building -it multiple times with minor changes. This could mean taking the same Scala -codebase and building it across multiple Scala versions, taking the same -application and building twice for dev/release, or taking the same module -config and building it across a variety of source folders. - -== Simple Cross Modules - -include::example/depth/cross/1-simple.adoc[] - -== Default Cross Modules - -include::example/depth/cross/11-default-cross-module.adoc[] - -== Cross Modules Source Paths - -include::example/depth/cross/2-cross-source-path.adoc[] - -== Using Cross Modules from Outside Targets - -include::example/depth/cross/3-outside-dependency.adoc[] - -== Using Cross Modules from other Cross Modules - -include::example/depth/cross/4-cross-dependencies.adoc[] - -== Multiple Cross Axes - -include::example/depth/cross/5-multiple-cross-axes.adoc[] - -== Extending Cross Modules - -include::example/depth/cross/6-axes-extension.adoc[] - -== Inner Cross Modules - -include::example/depth/cross/7-inner-cross-module.adoc[] - -== Cross Resolvers - -include::example/depth/cross/8-resolvers.adoc[] - -== Dynamic Cross Modules - -include::example/depth/cross/9-dynamic-cross-modules.adoc[] - -== Use Case: Static Blog - -include::example/depth/cross/10-static-blog.adoc[] diff --git a/docs/modules/ROOT/pages/Import_File_And_Import_Ivy.adoc b/docs/modules/ROOT/pages/Import_File_And_Import_Ivy.adoc deleted file mode 100644 index 0deaecd7767..00000000000 --- a/docs/modules/ROOT/pages/Import_File_And_Import_Ivy.adoc +++ /dev/null @@ -1,4 +0,0 @@ -= import $file and import $ivy - -include::example/extending/imports/3-import-ivy.adoc[] - diff --git a/docs/modules/ROOT/pages/Java_Build_Examples.adoc b/docs/modules/ROOT/pages/Java_Build_Examples.adoc deleted file mode 100644 index 88d48316d05..00000000000 --- a/docs/modules/ROOT/pages/Java_Build_Examples.adoc +++ /dev/null @@ -1,73 +0,0 @@ -= Java Build Examples - -++++ - -++++ - -On this page, we will explore the Mill build tool via a series of simple Java -example projects. Each project demonstrates one particular feature of the Mill -build tool, and is also an executable codebase you can download and run. By the -end of this page, you will be familiar with how to configure Mill to work with -realistic Java codebases: cross-building, testing, and publishing them. - - -Many of the APIs covered here are listed in the API documentation: - -* {mill-doc-url}/api/latest/mill/main/RootModule.html[`mill.scalalib.RootModule`] -* {mill-doc-url}/api/latest/mill/scalalib/TestModule$.html[`mill.scalalib.TestModule`] -* {mill-doc-url}/api/latest/mill/scalalib/PublishModule.html[`mill.scalalib.PublishModule`] -* {mill-doc-url}/api/latest/mill/scalalib/MavenModule.html[`mill.scalalib.MavenModule`] -* {mill-doc-url}/api/latest/mill/scalalib/JavaModule.html[`mill.scalalib.JavaModule`] - -== Common Configuration Overrides - -include::example/javalib/builds/1-common-config.adoc[] - -== Custom Tasks - -include::example/javalib/builds/2-custom-tasks.adoc[] - -== Overriding Tasks - -include::example/javalib/builds/3-override-tasks.adoc[] - -== Nesting Modules - -include::example/javalib/builds/4-nested-modules.adoc[] - -== Publish Module - -include::example/javalib/builds/6-publish-module.adoc[] - - -== Maven-Compatible Modules - -include::example/javalib/builds/8-compat-modules.adoc[] - - -== Realistic Java Example Project - -include::example/javalib/builds/9-realistic.adoc[] - - -== Example Builds for Real Projects - -Mill comes bundled with example builds for real-world open-source projects, -demonstrating how Mill can be used to build code outside of tiny example codebases: - -=== JimFS - -include::example/thirdparty/jimfs.adoc[] - -=== Apache Commons IO - -include::example/thirdparty/commons-io.adoc[] - -== Real World Mill Builds - -=== C3P0 - -https://github.com/swaldman/c3p0[C3P0] is a JDBC connection pooling library -written in Java, built using the Mill build tool. diff --git a/docs/modules/ROOT/pages/Java_Builtin_Commands.adoc b/docs/modules/ROOT/pages/Java_Builtin_Commands.adoc deleted file mode 100644 index 28b04476ac3..00000000000 --- a/docs/modules/ROOT/pages/Java_Builtin_Commands.adoc +++ /dev/null @@ -1,12 +0,0 @@ -= Built-in Commands - -Mill comes with a number of useful commands out of the box. These are listed -in the Scaladoc: - -* {mill-doc-url}/api/latest/mill/main/MainModule.html[mill.main.MainModule] - -Mill's built-in commands are typically not directly related to building your -application code, but instead are utilities that help you understand and work -with your Mill build. - -include::example/javalib/basic/4-builtin-commands.adoc[] diff --git a/docs/modules/ROOT/pages/Java_Installation_IDE_Support.adoc b/docs/modules/ROOT/pages/Java_Installation_IDE_Support.adoc deleted file mode 100644 index 951ea10b6bf..00000000000 --- a/docs/modules/ROOT/pages/Java_Installation_IDE_Support.adoc +++ /dev/null @@ -1,3 +0,0 @@ -= Installation and IDE Support - -include::partial$Installation_IDE_Support.adoc[] \ No newline at end of file diff --git a/docs/modules/ROOT/pages/Java_Intro_to_Mill.adoc b/docs/modules/ROOT/pages/Java_Intro_to_Mill.adoc deleted file mode 100644 index 6bfc0792c63..00000000000 --- a/docs/modules/ROOT/pages/Java_Intro_to_Mill.adoc +++ /dev/null @@ -1,105 +0,0 @@ - -// Author Notes: -// -// This is the first page a user is expected to land on when learning about -// Mill. It is designed to be a quick, broad overview to get someone started: -// what is Mill, why should they care, and what some simple Mill builds look -// like and how to use them. We intentionally touch shallowly on a lot of -// topics without giving them a proper discussion, since the other pages have -// plenty of space to go in-depth. -// -// By the end of this page, a prospective Mill user should be familiar with -// what Mill is, hopefully have downloaded an example to try out, and be -// interested in learning more about the Mill build tool - -= Introduction to Mill for Java - -++++ - -++++ - -:page-aliases: index.adoc, Intro_to_Mill.adoc, Intro_to_Mill_for_Java.adoc - -:language: Java - -include::partial$Intro_to_Mill_Header.adoc[] - -Mill is used to build some real-world Java projects, such as the -https://github.com/swaldman/c3p0[C3P0 JDBC Connection Pool], and -can be used for applications built on top of common Java frameworks like -xref:Java_Web_Examples.adoc#_spring_boot_todomvc_app[Spring Boot] or -xref:Java_Web_Examples.adoc#_micronaut_todomvc_app[Micronaut]. - -Mill borrows ideas from other tools like https://maven.apache.org/[Maven], -https://gradle.org/[Gradle], https://bazel.build/[Bazel], but tries to learn from the -strengths of each tool and improve on their weaknesses. Although Maven and Gradle -are mature widely-used tools, they have fundamental limitations in their design -(https://blog.ltgt.net/maven-is-broken-by-design/[Maven Design], -https://www.bruceeckel.com/2021/01/02/the-problem-with-gradle/[Gradle Design]) that make -them difficult to improve upon incrementally. - -xref:Case_Study_Mill_vs_Maven.adoc[Compared to Maven]: - -* **Mill follows Maven's innovation of good built-in defaults**: Mill's built-in - ``JavaModule``s follow Maven's "convention over configuration" style. Small mill - projects require minimal effort to get started, and larger Mill projects have a consistent - structure building on these defaults. - -* **Mill makes customizing the build tool much easier than Maven**. Projects usually - grow beyond just compiling a single language: needing custom - code generation, linting workflows, output artifacts, or support for - additional languages. Mill makes doing this yourself easy, so you are not beholden - to third-party plugins that may not exist, be well maintained, or interact well with each other. - -* **Mill automatically caches and parallelizes your build**: Not just the - built-in tasks that Mill ships with, but also any custom tasks or modules. - This maximizes performance and snappiness of - your command-line build workflows, and especially matters in larger codebases where builds - tend to get slow: a Maven `clean install` taking over a minute might take just a - few seconds in Mill. - -xref:Case_Study_Mill_vs_Gradle.adoc[Compared to Gradle]: - -* **Mill follows Gradle's conciseness**: Rather than pages and pages of verbose XML, every - line in a Mill build is meaningful. e.g. adding a dependency is 1 line in - Mill, like it is in Gradle, and unlike the 5 line `` declaration you find in Maven. - Skimming and understanding a 100-line Mill `build.mill` file is - often much easier than skimming the equivalent 500-line Maven `pom.xml`. - -* **Mill builds more performant**: Although both Mill and Gradle automatically cache and - parallelize your build, Mill does so with much less fixed overhead, resulting in 2-3x - speedups in common command-line workflows. This means less time waiting for your build - tool, and more time focusing on the things that really matter to your project. - -* **Mill enforces best practices by default**. All Mill tasks are cached by default, even - custom tasks. All Mill tasks write their output to disk xref:Out_Dir.adoc[a - standard place]. All task inter-dependencies are automatically captured, without - needing manual annotation. All Mill builds are incremental, not just tasks but also - xref:The_Mill_Evaluation_Model.adoc#_caching_at_each_layer_of_the_evaluation_model[configuration - and other phases]. Where Gradle requires considerable - https://docs.gradle.org/current/userguide/incremental_build.html[effort and expertise] - to maintain your build, Mill automates it so the - easiest thing to do is almost always the right thing to do. - -Mill build files are written in Scala, but you do not need to have prior experience -in Scala to read or write them. Like Gradle Groovy or Maven XML, it's easy to learn -enough Scala for Mill without needing to become an expert in the language. - - -include::partial$Intro_to_Mill_BlogVideo.adoc[] - -== Simple Java Module - -include::example/javalib/basic/1-simple.adoc[] - -== Custom Build Logic - -include::example/javalib/basic/2-custom-build-logic.adoc[] - -== Multi-Module Project - -include::example/javalib/basic/3-multi-module.adoc[] - -include::partial$Intro_to_Mill_Footer.adoc[] diff --git a/docs/modules/ROOT/pages/Java_Module_Config.adoc b/docs/modules/ROOT/pages/Java_Module_Config.adoc deleted file mode 100644 index f738f793134..00000000000 --- a/docs/modules/ROOT/pages/Java_Module_Config.adoc +++ /dev/null @@ -1,90 +0,0 @@ -= Java Module Configuration - -++++ - -++++ - -This page goes into more detail about the various configuration options -for `JavaModule`. - -Many of the APIs covered here are listed in the API documentation: - -* {mill-doc-url}/api/latest/mill/scalalib/JavaModule.html[mill.javaalib.JavaModule] - - -== Compilation & Execution Flags - -include::example/javalib/module/1-compilation-execution-flags.adoc[] - -== Adding Ivy Dependencies - -include::example/javalib/module/2-ivy-deps.adoc[] - -== Runtime and Compile-time Dependencies - -include::example/javalib/module/3-run-compile-deps.adoc[] - -== Classpath and Filesystem Resources - -include::example/javalib/module/5-resources.adoc[] - -== Annotation Processors - -include::example/javalib/module/6-annotation-processors.adoc[] - -== Javadoc Config - -include::example/javalib/module/7-docjar.adoc[] - -== Unmanaged Jars - -include::example/javalib/module/8-unmanaged-jars.adoc[] - -== Specifying the Main Class - -include::example/javalib/module/9-main-class.adoc[] - -== Downloading Non-Maven Jars - -include::example/javalib/module/10-downloading-non-maven-jars.adoc[] - -== Customizing the Assembly - -include::example/javalib/module/11-assembly-config.adoc[] - -== Repository Config - -include::example/javalib/module/12-repository-config.adoc[] - -=== Maven Central: Blocked! - -Under some circumstances (e.g. corporate firewalls), you may not have access maven central. The typical symptom will be error messages which look like this; - ----- -1 targets failed -mill.scalalib.ZincWorkerModule.classpath -Resolution failed for 1 modules: --------------------------------------------- - com.lihaoyi:mill-scalalib-worker_2.13:0.11.1 - not found: C:\Users\partens\.ivy2\local\com.lihaoyi\mill-scalalib-worker_2.13\0.11.1\ivys\ivy.xml - download error: Caught java.io.IOException (Server returned HTTP response code: 503 for URL: https://repo1.maven.org/maven2/com/lihaoyi/mill-scalalib-worker_2.13/0.11.1/mill-scalalib-worker_2.13-0.11.1.pom) while downloading https://repo1.maven.org/maven2/com/lihaoyi/mill-scalalib-worker_2.13/0.11.1/mill-scalalib-worker_2.13-0.11.1.pom ----- - -It is expected that basic commands (e.g. clean) will not work, as Mill saying it is unable to resolve it's own, fundamental, dependencies. Under such circumstances, you will normally have access to some proxy, or other corporate repository which resolves maven artefacts. The strategy is simply to tell mill to use that instead. - -The idea is to set an environment variable COURSIER_REPOSITORIES (see coursier docs). The below command should set the environment variable for the current shell, and then run the mill command. - ----- - COURSIER_REPOSITORIES=https://packages.corp.com/artifactory/maven/ mill resolve _ ----- - -If you are using millw, a more permanent solution could be to set the environment variable at the top of the millw script, or as a user environment variable etc. - - - -== Native C Code with JNI - -include::example/javalib/module/13-jni.adoc[] - diff --git a/docs/modules/ROOT/pages/Java_Web_Examples.adoc b/docs/modules/ROOT/pages/Java_Web_Examples.adoc deleted file mode 100644 index 8125e5279e4..00000000000 --- a/docs/modules/ROOT/pages/Java_Web_Examples.adoc +++ /dev/null @@ -1,31 +0,0 @@ -= Java Web Examples - -++++ - -++++ - -This page contains examples of using Mill as a build tool for web-applications. -It covers setting up a basic backend server with a variety of server frameworks - -== Jetty Hello World App - -include::example/javalib/web/1-hello-jetty.adoc[] - - -== Spring Boot Hello World App - -include::example/javalib/web/2-hello-spring-boot.adoc[] - -== Spring Boot TodoMvc App - -include::example/javalib/web/3-todo-spring-boot.adoc[] - -== Micronaut Hello World App - -include::example/javalib/web/4-hello-micronaut.adoc[] - -== Micronaut TodoMvc App - -include::example/javalib/web/5-todo-micronaut.adoc[] \ No newline at end of file diff --git a/docs/modules/ROOT/pages/Library_Dependencies.adoc b/docs/modules/ROOT/pages/Library_Dependencies.adoc deleted file mode 100644 index cff5e929e8a..00000000000 --- a/docs/modules/ROOT/pages/Library_Dependencies.adoc +++ /dev/null @@ -1,289 +0,0 @@ -= Library Dependencies in Mill -:link-coursier: https://github.com/coursier/coursier -:link-coursier-doc: https://get-coursier.io/docs/overview - -Beside the dependencies between Mill modules, most non-trivial source projects have dependencies to other libraries. - -Mill uses {link-coursier}[coursier] to resolve and download dependencies. -Once downloaded, they are located in the coursier specific cache locations. -For more details about coursier, refer to the {link-coursier-doc}[coursier documentation]. - -== Dependencies in General - -Mill dependencies have the simple form: - ----- -ivy"{organization}:{name}:{version}" ----- - -Additional attributes are also supported: - ----- -ivy"{organization}:{name}:{version}[;{attribute}={value}]*" ----- - -When working in other Java and Scala projects, you will find some synonyms, which typically all mean the same. - -For example in the Maven ecosystem, the `organization` is called the `group` and the `name` is called the `artifact`. -The whole tripplet is ofthe called `GAV`. - -In Mill we use the additional term `artifactId` which is identical to the `name` when used in the normal form shown above. -When a different form is used, e.g. some double-colons are used between the parts, the `artifactId` typically contains suffixes, but the name doesn't. - -.Example for a simple Java Dependency -[source,scala] ----- -def ivyDeps = Agg( - ivy"org.slf4j:slf4j-api:1.7.25" -) ----- - -== Scala dependencies - -Scala major releases up until version `2.13` are binary incompatible. -That means, mixing dependencies of different binary platforms will result in non-working runtimes and obscure and hard to debug issues. - -To easily pick only a compatible version, a convention was established to append the scala major version as a suffix to the package name.footnote:[ -Scala 2 versions have the unusual version format: `{epoch}.{major}.{minor}`.] -E.g. to select the Scala 2.13 version of a library `foo`, the final `artifactId` will contain the additional suffix `_2.13`, such that the final `artifactId` is `foo_2.13`. - -To always pick the right version and support cross compiling, -you can omit the scala version and instead use a double colon (`::`) between the `organization` and the `name`, e.g. `ivy"com.typesafe.akka:akka-actor_2.12:2.5.25"`. -Your module needs to `extends ScalaModule` though. - -If you want to use dependencies that are cross-published against the full Scala version, e.g. `2.12.12`, -you can use three colons (`:::`) between `organization` and `name`, e.g.: `ivy"org.scalamacros:::paradise:2.1.1"`. - -.Example -[source,scala] ----- -def ivyDeps = Agg( - // explicit scala version suffix, NOT RECOMMENDED! - ivy"com.typesafe.akka:akka-actor_2.12:2.5.25", - ivy"com.typesafe.akka::akka-actor:2.5.25", - ivy"org.scalamacros:::paradise:2.1.1" -) ----- - -== Scala 3 interoperability - -Since the release of Scala 3, the binary compatibility story for Scala has changed. -That means, Scala 3 dependencies can be mixed with Scala 2.13 dependencies. -In fact, the Scala 3 standard library is the same as for Scala 2.13. - - -[CAUTION] --- -As Scala 3 and Scala 2.13 have different binary platforms, but their artifacts are in general compatible, this introduces new challenges. - -There is currently no mechanism, that impedes to bring the same dependency twice into the classpath (one for Scala 2.13 and one for Scala 3). --- - - -=== Using Scala 2.13 from Scala 3 - -If your Scala version is a Scala 3.x, but you want to use the Scala 2.13 version of a specific library, you can use the `.withDottyCompat` method on that dependency. - -.Example: -[source,scala] ----- -def scalaVersion = "3.2.1" -def ivyDeps = Agg( - ivy"com.lihaoyi::upickle:2.0.0".withDottyCompat(scalaVersion()) //1 -) ----- -<1> This will result in a Scala 2.13 dependency `com.lihaoyi::upicke_2.13:2.0.0` - - -[NOTE] --- -Do you wonder where the name "dotty" comes from? - -In the early development of Scala 3, the Scala 3 compiler was called "Dotty". Later, the name was changed to Scala 3, but the compiler project itself is still named "dotty". - -The dotty compiler itself is an implementation of the http://lampwww.epfl.ch/~amin/dot/fool.pdf[Dependent Object Types (DOT) calculus], which is the new basis of Scala 3. It also enhances the type system to a next level and allows features like union-types and intersection-types. --- - -== Test dependencies (there is no `test` scope) - -One difference between Mill and other build tools like sbt or Maven is the fact, that tests are ordinary submodules on their own. -For convenience, most modules already come with a pre-configured trait for a test submodule, -which already inherits all dependencies of their parent module. -If you need additional test dependencies, you simply add them by overriding `def ivyDeps`, as you would do with normal library dependencies. - -When migrating a sbt project and seeing a dependency like this: `"ch.qos.logback" % "logback-classic" % "1.2.3" % "test"`, -simply add it to the test module's `ivyDeps` as ordinary dependency. -There is no special test scope in Mill. - -.Example -[source,scala] ----- -object main extends JavaModule { - object test extends JavaModuleTests { - def ivyDeps = Agg( - ivy"org.qos.logback:logback-classic:1.2.3" - ) - } -} ----- - -== Compile-only dependencies (`provided` scope) - -If you want to use a dependency only at compile time, you can declare it with the `compileIvyDeps` target. - -.Example -[source,scala] ----- -def compileIvyDeps = Agg( - ivy"org.slf4j:slf4j-api:1.7.25" -) ----- - -When Mill generated file to interact with package manager like `pom.xml` for Maven repositories, such compile-only dependencies are mapped to the `provided` scope. - -Please note, that dependencies with `provided` scope will never be resolved transitively. Hence, the name "provided", as the target runtime needs to "provide" them, if they are needed. - - -== Runtime dependencies - -If you want to declare dependencies to be used at runtime (but not at compile time), you can use the `runIvyDeps` targets. - -.Example -[source,scala] ----- -def runIvyDeps = Agg( - ivy"ch.qos.logback:logback-classic:1.2.0" -) ----- - -It is also possible to use a higher version of the same library dependencies already defined in `ivyDeps`, to ensure you compile against a minimal API version, but actually run with the latest available version. - -== Detecting transitive dependencies - -To render a tree of dependencies (transitive included) you can run `mill myModule.ivyDepsTree`. Here is how the start of `./mill __.ivyDepsTree` looks like in the `mill` project itself: - -[source,text] ----- -├─ ch.epfl.scala:bsp4j:2.1.0-M3 -│ ├─ org.eclipse.lsp4j:org.eclipse.lsp4j.generator:0.12.0 -│ │ ├─ org.eclipse.lsp4j:org.eclipse.lsp4j.jsonrpc:0.12.0 -│ │ │ └─ com.google.code.gson:gson:2.9.1 -│ │ └─ org.eclipse.xtend:org.eclipse.xtend.lib:2.24.0 -│ │ ├─ org.eclipse.xtend:org.eclipse.xtend.lib.macro:2.24.0 -│ │ │ └─ org.eclipse.xtext:org.eclipse.xtext.xbase.lib:2.24.0 -... -│ │ ├─ com.lihaoyi:fastparse_2.13:2.3.0 -│ │ │ ├─ com.lihaoyi:geny_2.13:0.6.0 -> 0.7.1 (possible incompatibility) -│ │ │ │ └─ org.scala-lang:scala-library:2.13.10 -│ │ │ └─ com.lihaoyi:sourcecode_2.13:0.2.1 -> 0.3.0 (possible incompatibility) ----- - -After compiling your module(s) you can find and examine files such as `ivyDeps.json` and `transitiveIvyDeps.json` in your `out` build's folder for a given module. -After running the `ivyDepsTree` command you'll also find the `ivyDepsTree.json` and `ivyDepsTree.log` file that contain the output of the above `ivyDepsTree` command. - -You can observe the actual version being used by running `mill show myModule.resolvedIvyDeps`. If you run `mill myModule.resolvedIvyDeps`, the same information is available in `out/myModule/resolvedIvyDeps.json`. - -=== Figuring out where a dependency comes from - -There will be times when you want to figure out where a dependency is coming -from. The output of `ivyDepsTree` can be quite large in larger projects so the -command provides a nice utility to be able to target the part of the tree that -brings in a specific dependency. - -For example, let's again use the Mill codebase as an example. We'll search the -tree in the `main` module and try to find where the `jsoniter-scala-core_2.13` -artifact is coming from using the `--whatDependsOn` argument: - -[source,txt] ----- -❯ ./mill -i dev.run ~/Documents/scala-workspace/com-lihaoyi/mill main.ivyDepsTree --whatDependsOn com.github.plokhotnyuk.jsoniter-scala:jsoniter-scala-core_2.13 -[33/33] main.ivyDepsTree -└─ com.github.plokhotnyuk.jsoniter-scala:jsoniter-scala-core_2.13:2.13.5 - ├─ io.get-coursier:coursier_2.13:2.1.0-RC1 - └─ org.virtuslab.scala-cli:config_2.13:0.1.16 - └─ io.get-coursier:coursier-cache_2.13:2.1.0-RC1 - └─ io.get-coursier:coursier_2.13:2.1.0-RC1 ----- - -By looking at the output we can see that it's our dependency on `coursier_2.13` -that is bringining in the `jsoniter-scala-core_2.13` artifact. - -The `--whatDependsOn` argument can also be repeated to target multiple -artifacts at once. Just repeat the `--whatDependsOn ` pattern. Note -that the artifact pattern follows the `org:artifact` convention. You can't -include a version as the utility will show you all usages of the `artifact`. -Also note that when using `--whatDependsOn` on usage of `--inverse` is forced -in order to make the tree appear in an inverted manner to more easily show you -where the dependency is coming from. - -== Excluding transitive dependencies - -You can use the `.exclude` method on a dependency. It accepts `organization` and `name` tuples, to be excluded. -Use the special name `*` to match all ``organization``s or ``name``s. - -.Example: Exclude `fansi_2.12` library from transitive dependency set of `pprint`. -[source,scala] ----- -def deps = Agg( - ivy"com.lihaoyi::pprint:0.5.3".exclude("com.lihaoyi" -> "fansi_2.12") -) ----- - -You can also use `.excludeOrg` or `excludeName`: - -There is also a short notation available: - -.Example: Shot notation to exclude `fansi_2.12` library from transitive dependency set of `pprint`. -[source,scala] ----- -def deps = Agg( - ivy"com.lihaoyi::pprint:0.5.3;exclude=com.lihaoyi:fansi_2.12" -) ----- - -.Example: Exclude all `com.lihaoyi` libraries from transitive dependency set of `pprint`. -[source,scala] ----- -val deps = Agg(ivy"com.lihaoyi::pprint:0.5.3".excludeOrg("com.lihaoyi")) ----- - -Note: You can chain multiple exclusions with `exclude`, `excludeOrg`, and `excludeName`. - -.Example: Excluding a library (fansi) by name from transitive dependency set of `pprint`. -[source,scala] ----- -val deps = Agg( - ivy"com.lihaoyi::pprint:0.5.3" - .excludeName("fansi_2.12") - .excludeName("sourcecode") -) ----- - -== Forcing versions - -CAUTION: Please treat forceVersion as experimental; it has some bugs and isn't production-ready (forced versions https://github.com/com-lihaoyi/mill/issues/1975[aren't propagated to published artifacts]). - -You can use the `forceVersion` method to ensure the used version of a dependency is what you have declared. - -* You declare a dependency `val deps = Agg(ivy"com.lihaoyi::fansi:0.2.14")` -* There is another dependency, `val deps = Agg(ivy"com.lihaoyi::PPrint:0.8.1")` -* PPrint 0.8.1 uses fansi 0.4.0, so it is a transitive dependency -* `mill show myModule.resolvedIvyDeps | grep "fansi"` should show fansi 0.4.0 -* If you want to force to the older version (to prevent it being evicted, and replaced by 0.4.0) then you can use `val deps = Agg(ivy"com.lihaoyi::fansi:0.2.14".forceVersion())` -* `mill show myModule.resolvedIvyDeps | grep "fansi"` should show fansi 0.2.14 - -== ScalaJS dependencies - -Scala.js introduces an additional binary platform axis. -To the already required Scala version, there comes the Scala.js version. - -You can use two colons (`::`) between `name` and `version` to define a Scala.js dependency. -Your module needs to `extends ScalaJSModule` to accept Scala.js dependencies. - -== Scala Native dependencies - -Scala Native introduces an additional binary platform axis. -To the already required Scala version, there comes the Scala Native version. - -You can use two colons (`::`) between `name` and `version` to define a Scala Native dependency. -Your module needs to `extends ScalaNativeModule` to accept Scala Native dependencies. diff --git a/docs/modules/ROOT/pages/Linting_Java_Projects.adoc b/docs/modules/ROOT/pages/Linting_Java_Projects.adoc deleted file mode 100644 index b9d391bdb3f..00000000000 --- a/docs/modules/ROOT/pages/Linting_Java_Projects.adoc +++ /dev/null @@ -1,24 +0,0 @@ -= Linting Java Projects - -++++ - -++++ - -This page will discuss common topics around working with test suites using the Mill build tool - -== ErrorProne - -include::example/javalib/linting/1-error-prone.adoc[] - -== Checkstyle - -include::example/javalib/linting/2-checkstyle.adoc[] - -== Jacoco Code Coverage - -Mill supports Java code coverage analysis via the mill-jacoco plugin. See the -plugin repository documentation for more details: - -* https://github.com/lefou/mill-jacoco \ No newline at end of file diff --git a/docs/modules/ROOT/pages/Mill_Design_Principles.adoc b/docs/modules/ROOT/pages/Mill_Design_Principles.adoc deleted file mode 100644 index a5da56adcc5..00000000000 --- a/docs/modules/ROOT/pages/Mill_Design_Principles.adoc +++ /dev/null @@ -1,437 +0,0 @@ -= Mill Design Principles -:page-aliases: Mill_Internals.adoc - - -The following external resources give more of a flavor of the architecture behind -Mill: - -* https://www.lihaoyi.com/post/SoWhatsSoSpecialAboutTheMillScalaBuildTool.html[So, What's So Special About The Mill Scala Build Tool?] -* https://www.youtube.com/watch?v=j6uThGxx-18[Video: Mill a Build Tool based on Pure Functional Programming] -* http://www.lihaoyi.com/post/SowhatswrongwithSBT.html[Blog Post: So, what's wrong with SBT?] -* http://www.lihaoyi.com/post/BuildToolsasPureFunctionalPrograms.html[Blog Post: Build Tools as Pure Functional Programs] - -== Principles - -=== Dependency graph first - -Mill's most important abstraction is the dependency graph of ``Task``s. -Constructed using the `T {...}` `T.task {...}` `T.command {...}` syntax, these -track the dependencies between steps of a build, so those steps can be executed -in the correct order, queried, or parallelized. - -While Mill provides helpers like `ScalaModule` and other things you can use to -quickly instantiate a bunch of related tasks (resolve dependencies, find -sources, compile, package into jar, ...) these are secondary. When Mill -executes, the dependency graph is what matters: any other mode of organization -(hierarchies, modules, inheritance, etc.) is only important to create this -dependency graph of ``Task``s. - -=== Builds are hierarchical - -The syntax for running targets from the command line `mill Foo.bar.baz` is -the same as referencing a target in Scala code, `Foo.bar.baz` - -Everything that you can run from the command line lives in an object hierarchy -in your `build.mill` file. Different parts of the hierarchy can have different -``Target``s available: just add a new `def foo = T {...}` somewhere and you'll be -able to run it. - -Cross builds, using the `Cross` data structure, are just another kind of node in -the object hierarchy. The only difference is syntax: from the command line you'd -run something via `mill core.cross[a].printIt` while from code you use -`core.cross("a").printIt` due to different restrictions in Scala/Bash syntax. - -=== Caching by default - -Every `Target` in a build, defined by `def foo = T {...}`, is cached by default. -Currently this is done using a `foo.json` file in the `out/` folder. The -`Target` is also provided a `foo.dest/` path on the filesystem dedicated to it, for -it to store output files etc. - -This happens whether you want it to or not. Every `Target` is cached, not just -the "slow" ones like `compile` or `assembly`. - -Caching is keyed on the `.hashCode` of the returned value. For ``Target``s -returning the contents of a file/folder on disk, they return `PathRef` instances -whose hashcode is based on the hash of the disk contents. Serialization of the -returned values is done using uPickle. - -=== Functional Purity - -Mill relies heavily on build targets being "pure": they only depend on their -input targets, and their only output is their return value. They do not -scribble all over the filesystem, reading and writing from random places. That -is what allows us to be aggressive about caching and parallelizing the -evaluation of build targets during a build. - -Many kinds of build steps do require files on disk, and for that Mill provides -the `T.dest` folder. This is a folder on disk dedicated to each build target, -so that it can read and write things to it without worrying about conflicts -with other targets that have their own `T.dest` folders. In effect, this makes -even file output "pure": we can know precisely where a target's output files -live when we need to invalidate them, and it allows multiple targets all -reading and writing to the filesystem to do so safely even when in parallel. - -=== Short-lived build processes - -The Mill build process is meant to be run over and over, not only as a -long-lived daemon/console. That means we must minimize the startup time of the -process, and that a new process must be able to re-construct the in-memory data -structures where a previous process left off, in order to continue the build. - -Re-construction is done via the hierarchical nature of the build: each `Target` -`foo.bar.baz` has a fixed position in the build hierarchy, and thus a fixed -position on disk `out/foo/bar/baz.json`. When the old process dies and a -new process starts, there will be a new instance of `Target` with the same -implementation code and same position in the build hierarchy: this new `Target` -can then load the `out/foo/bar/baz.json` file and pick up where the -previous process left off. - -Minimizing startup time means aggressive caching, as well as minimizing the -total amount of bytecode used: Mill's current 1-2s startup time is dominated by -JVM classloading. By default Mill uses a long-lived compile server to speed -things up even more, but ensuring that the "from scratch" performance remains -good is a core ongoing requirement. - -=== Static dependency graph and Applicative tasks - -``Task``s are _Applicative_, not _Monadic_. There is `.map`, `.zip`, but no -`.flatMap` operation. That means that we can know the structure of the entire -dependency graph before we start executing ``Task``s. This lets us perform all -sorts of useful operations on the graph before running it: - -* Given a Target the user wants to run, pre-compute and display what targets - will be evaluated ("dry run"), without running them - -* Automatically parallelize different parts of the dependency graph that do not - depend on each other, perhaps even distributing it to different worker - machines like Bazel/Pants can - -* Visualize the dependency graph easily, e.g. by dumping to a DOT file - -* Query the graph, e.g. "why does this thing depend on that other thing?" - -* Avoid running tasks "halfway": if a Target's upstream Targets fail, we can - skip the Target completely rather than running halfway and then bailing out - with an exception - -In order to avoid making people using `.map` and `.zip` all over the place when -defining their ``Task``s, we use the `T {...}`/`T.task {...}`/`T.command {...}` -macros which allow you to use `Task#apply()` within the block to "extract" a -value. - -[source,scala] ----- -def test() = T.command { - TestRunner.apply( - "mill.UTestFramework", - runDepClasspath().map(_.path) :+ compile().path, - Seq(compile().path) - -} - ----- - -This is roughly equivalent to the following: - -[source,scala] ----- -def test() = T.command { T.zipMap(runDepClasspath, compile, compile) { - (runDepClasspath1, compile2, compile3) => - TestRunner.apply( - "mill.UTestFramework", - runDepClasspath1.map(_.path) :+ compile2.path, - Seq(compile3.path) - ) -} - ----- - -This is similar to SBT's `:=`/`.value` macros, or ``scala-async``'s -`async`/`await`. Like those, the `T {...}` macro should let users program most of -their code in a "direct" style and have it "automatically" lifted into a graph -of ``Task``s. - -== How Mill aims for Simple - -Why should you expect that the Mill build tool can achieve simple, easy & -flexible, where other build tools in the past have failed? - -Build tools inherently encompass a huge number of different concepts: - -* What "Tasks" depends on what? -* How do I define my own tasks? -* Where do source files come from? -* What needs to run in what order to do what I want? -* What can be parallelized and what can't? -* How do tasks pass data to each other? What data do they pass? -* What tasks are cached? Where? -* How are tasks run from the command line? -* How do you deal with the repetition inherent in a build? (e.g. compile, run & - test tasks for every "module") -* What is a "Module"? How do they relate to "Tasks"? -* How do you configure a Module to do something different? -* How are cross-builds (across different configurations) handled? - -These are a lot of questions to answer, and we haven't even started talking -about the actually compiling/running any code yet! If each such facet of a build -was modelled separately, it's easy to have an explosion of different concepts -that would make a build tool hard to understand. - -Before you continue, take a moment to think: how would you answer to each of -those questions using an existing build tool you are familiar with? Different -tools like http://www.scala-sbt.org/[SBT], -https://fake.build/legacy-index.html[Fake], https://gradle.org/[Gradle] or -https://gruntjs.com/[Grunt] have very different answers. - -Mill aims to provide the answer to these questions using as few, as familiar -core concepts as possible. The entire Mill build is oriented around a few -concepts: - -* The Object Hierarchy -* The Call Graph -* Instantiating Traits & Classes - -These concepts are already familiar to anyone experienced in Scala (or any other -programming language…), but are enough to answer all of the complicated -build-related questions listed above. - -=== The Object Hierarchy - -[graphviz] -.... -digraph G { - node [shape=box width=0 height=0 style=filled fillcolor=white] - bgcolor=transparent - "root-module" [style=dashed] - foo1 [style=dashed] - foo2 [style=dashed] - "root-module" -> foo1 -> "foo1.bar" [style=dashed] - foo1 -> "foo1.qux" [style=dashed] - "root-module" -> foo2 -> "foo2.bar" [style=dashed] - foo2 -> "foo2.qux" [style=dashed] - foo2 -> "foo2.baz" [style=dashed] -} -.... - -The module hierarchy is the graph of objects, starting from the root of the -`build.mill` file, that extend `mill.Module`. At the leaves of the hierarchy are -the ``Target``s you can run. - -A ``Target``'s position in the module hierarchy tells you many things. For -example, a `Target` at position `core.test.compile` would: - -* Cache output metadata at `out/core/test/compile.json` - -* Output files to the folder `out/core/test/compile.dest/` - -* Source files default to a folder in `core/test/`, `core/test/src/` - -* Be runnable from the command-line via `mill core.test.compile` - -* Be referenced programmatically (from other ``Target``s) via `core.test.compile` - -From the position of any `Target` within the object hierarchy, you immediately -know how to run it, find its output files, find any caches, or refer to it from -other ``Target``s. You know up-front where the ``Target``s data "lives" on disk, and -are sure that it will never clash with any other ``Target``s data. - -=== The Call Graph - -[graphviz] -.... -digraph G { - rankdir=LR - node [shape=box width=0 height=0 style=filled fillcolor=white] - bgcolor=transparent - newrank=true; - subgraph cluster_0 { - style=dashed - node [shape=box width=0 height=0 style=filled fillcolor=white] - label = "foo.bar"; - - "foo.bar.sources" -> "foo.bar.compile" -> "foo.bar.classPath" -> "foo.bar.assembly" - "foo.bar.mainClass" -> "foo.bar.assembly" - } - subgraph cluster_1 { - style=dashed - node [shape=box width=0 height=0 style=filled fillcolor=white] - label = "foo"; - - "foo.bar.classPath" -> "foo.compile" [constraint=false]; - "foo.bar.classPath" -> "foo.classPath" - "foo.sources" -> "foo.compile" -> "foo.classPath" -> "foo.assembly" - "foo.mainClass" -> "foo.assembly" - } - subgraph cluster_2 { - style=dashed - node [shape=box width=0 height=0 style=filled fillcolor=white] - label = "qux"; - - "qux.mainClass" -> "qux.assembly" - "foo.classPath" -> "qux.compile" [constraint=false]; - "foo.classPath" -> "qux.classPath" - "qux.sources" -> "qux.compile" -> "qux.classPath" -> "qux.assembly" - } -} -.... - -The Scala call graph of "which target references which other target" is core to -how Mill operates. This graph is reified via the `T {...}` macro to make it -available to the Mill execution engine at runtime. The call graph tells you: - -* Which ``Target``s depend on which other ``Target``s - -* For a given `Target` to be built, what other ``Target``s need to be run and in - what order - -* Which ``Target``s can be evaluated in parallel - -* What source files need to be watched when using `--watch` on a given target (by - tracing the call graph up to the ``Source``s) - -* What a given `Target` makes available for other ``Target``s to depend on (via - its return value) - -* Defining your own task that depends on others is as simple as `def foo = - T {...}` - -The call graph within your Scala code is essentially a data-flow graph: by -defining a snippet of code: - -[source,scala] ----- -val b = ... -val c = ... -val d = ... -val a = f(b, c, d) ----- - -you are telling everyone that the value `a` depends on the values of `b` `c` and -`d`, processed by `f`. A build tool needs exactly the same data structure: -knowing what `Target` depends on what other ``Target``s, and what processing it -does on its inputs! - -With Mill, you can take the Scala call graph, wrap everything in the `T {...}` -macro, and get a `Target`-dependency graph that matches exactly the call-graph -you already had: - -[source,scala] ----- -def b = T { ... } -def c = T { ... } -def d = T { ... } -def a = T { f(b(), c(), d()) } ----- - -Thus, if you are familiar with how data flows through a normal Scala program, -you already know how data flows through a Mill build! The Mill build evaluation -may be incremental, it may cache things, it may read and write from disk, but -the fundamental syntax, and the data-flow that syntax represents, is unchanged -from your normal Scala code. - -=== Instantiating Traits & Classes - -Classes and traits are a common way of re-using common data structures in Scala: -if you have a bunch of fields which are related and you want to make multiple -copies of those fields, you put them in a class/trait and instantiate it over -and over. - -In Mill, inheriting from traits is the primary way for re-using common parts of -a build: - -* Scala "project"s with multiple related ``Target``s within them, are just a - `Trait` you instantiate - -* Replacing the default ``Target``s within a project, making them do new - things or depend on new ``Target``s, is simply `override`-ing them during - inheritance - -* Modifying the default ``Target``s within a project, making use of the old value - to compute the new value, is simply `override`ing them and using `super.foo()` - -* Required configuration parameters within a `project` are `abstract` members - -* Cross-builds are modelled as instantiating a (possibly anonymous) class - multiple times, each instance with its own distinct set of ``Target``s - -In normal Scala, you bundle up common fields & functionality into a `class` you -can instantiate over and over, and you can override the things you want to -customize. Similarly, in Mill, you bundle up common parts of a build into -``trait``s you can instantiate over and over, and you can override the things you -want to customize. "Subprojects", "cross-builds", and many other concepts are -reduced to simply instantiating a `trait` over and over, with tweaks. - -== Prior Work - -=== SBT - -Mill is built as a substitute for SBT, whose problems are -http://www.lihaoyi.com/post/SowhatswrongwithSBT.html[described here]. -Nevertheless, Mill takes on some parts of SBT (builds written in Scala, Task -graph with an Applicative "idiom bracket" macro) where it makes sense. - -=== Bazel - -Mill is largely inspired by https://bazel.build/[Bazel]. In particular, the -single-build-hierarchy, where every Target has an on-disk-cache/output-folder -according to their position in the hierarchy, comes from Bazel. - -Bazel is a bit odd in its own right. The underlying data model is good -(hierarchy + cached dependency graph) but getting there is hell. It (like SBT) is -also a 3-layer interpretation model, but layers 1 & 2 are almost exactly the -same: mutable python which performs global side effects (layer 3 is the same -dependency-graph evaluator as SBT/mill). - -You end up having to deal with a non-trivial python codebase where everything -happens via: - -[source,python] ----- -do_something(name="blah") ----- - -or - -[source,python] ----- -do_other_thing(dependencies=["blah"]) - ----- - -where `"blah"` is a global identifier that is often constructed programmatically -via string concatenation and passed around. This is quite challenging. - -Having the two layers be “just python” is great since people know python, but I -think it's unnecessary to have two layers ("evaluating macros" and "evaluating rule -impls") that are almost exactly the same, and I think making them interact via -return values rather than via a global namespace of programmatically-constructed -strings would make it easier to follow. - -With Mill, I’m trying to collapse Bazel’s Python layer 1 & 2 into just 1 layer -of Scala, and have it define its dependency graph/hierarchy by returning -values, rather than by calling global-side-effecting APIs. I've had trouble -trying to teach people how-to-bazel at work, and am pretty sure we can make -something that's easier to use. - -=== Scala.Rx - -Mill's "direct-style" applicative syntax is inspired by my old -https://github.com/lihaoyi/scala.rx[Scala.Rx] project. While there are -differences (Mill captures the dependency graph lexically using Macros, Scala.Rx -captures it at runtime), they are pretty similar. - -The end-goal is the same: to write code in a "direct style" and have it -automatically "lifted" into a dependency graph, which you can introspect and use -for incremental updates at runtime. - -Scala.Rx is itself build upon the 2010 paper -https://infoscience.epfl.ch/record/148043/files/DeprecatingObserversTR2010.pdf[Deprecating the Observer Pattern]. - -=== CBT - -Mill looks a lot like https://github.com/cvogt/cbt[CBT]. The inheritance based -model for customizing ``Module``s/``ScalaModule``s comes straight from there, as -does the "command line path matches Scala selector path" idea. Most other things -are different though: the reified dependency graph, the execution model, the -caching module all follow Bazel more than they do CBT diff --git a/docs/modules/ROOT/pages/Mill_Sandboxing.adoc b/docs/modules/ROOT/pages/Mill_Sandboxing.adoc deleted file mode 100644 index cfaa93d1c46..00000000000 --- a/docs/modules/ROOT/pages/Mill_Sandboxing.adoc +++ /dev/null @@ -1,24 +0,0 @@ -= Mill Sandboxing - -== Task Sandboxing - -include::example/depth/sandbox/1-task.adoc[] - -== Test Sandboxing - -include::example/depth/sandbox/2-test.adoc[] - -== Breaking Out Of Sandbox Folders - -include::example/depth/sandbox/3-breaking.adoc[] - -== Limitations - -Mill's approach to filesystem sandboxing is designed to avoid accidental interference -between different Mill tasks. It is not designed to block intentional misbehavior, and -tasks are always able to traverse the filesystem and do whatever they want. Furthermore, -Mill's redirection of `os.pwd` does not apply to `java.io` or `java.nio` APIs, which are -outside of Mill's control. - -However, by setting `os.pwd` to safe sandbox folders, we hope to minimize the cases where -someone accidentally causes issues with their build by doing the wrong thing. \ No newline at end of file diff --git a/docs/modules/ROOT/pages/Modules.adoc b/docs/modules/ROOT/pages/Modules.adoc deleted file mode 100644 index 85d47209517..00000000000 --- a/docs/modules/ROOT/pages/Modules.adoc +++ /dev/null @@ -1,59 +0,0 @@ -= Modules - -`mill.Module` serves two main purposes: - -1. As ``object``s, they serve as namespaces that let you group related ``Task``s - together to keep things neat and organized. - -2. As ``trait``s, they are re-usable templates that let you replicate groups - of related ``Task``s and sub-``Module``s while allowing customizations - -Mill's comes with built in modules such as `mill.scalalib.ScalaModule` and -`mill.scalalib.CrossSbtModule`, but you can also define your own modules to do -things that are not built-in to Mill. - -include::example/depth/modules/7-modules.adoc[] - -== Use Case: DIY Java Modules - -include::example/depth/modules/8-diy-java-modules.adoc[] - -== Backticked Names - -include::example/depth/modules/9-backticked-names.adoc[] - - -== External Modules - -Libraries for use in Mill can define ``ExternalModule``s: ``Module``s which are -shared between all builds which use that library: - -[source,scala] ----- -package foo -import mill._ - -object Bar extends mill.define.ExternalModule { - def baz = T { 1 } - def qux() = T.command { println(baz() + 1) } - - lazy val millDiscover = mill.define.Discover[this.type] -} ----- - -In the above example, `Bar` is an `ExternalModule` living within the `foo` -Java package, containing the `baz` target and `qux` command. Those can be run -from the command line via: - -[source,bash] ----- -mill foo.Bar/baz -mill foo.Bar/qux ----- - -``ExternalModule``s are useful for someone providing a library for use with Mill -that is shared by the entire build: for example, -`mill.scalalib.ZincWorkerApi/zincWorker` provides a shared Scala compilation -service & cache that is shared between all ``ScalaModule``s, and -`mill.scalalib.GenIdea/idea` lets you generate IntelliJ projects without -needing to define your own `T.command` in your `build.mill` file diff --git a/docs/modules/ROOT/pages/Out_Dir.adoc b/docs/modules/ROOT/pages/Out_Dir.adoc deleted file mode 100644 index ec0903dc99e..00000000000 --- a/docs/modules/ROOT/pages/Out_Dir.adoc +++ /dev/null @@ -1,113 +0,0 @@ -= The Output Directory - -Mill puts all its output in the top-level `out/` folder. - -== Structure of the `out/` Directory - -The `out/` folder contains all the generated files & metadata for your build. -It holds some files needed to manage Mill's longer running server instances (`out/mill-worker-*`) as well as a directory and file structure resembling the project's module structure. - -.Example of the `out/` directory after running `mill main.compile` -[source,text] ----- -out/ -├── main/ <1> -│ ├── allScalacOptions.json -│ ├── allSourceFiles.json -│ ├── allSources.json -│ ├── compile.dest/ <2> -│ ├── compile.json -│ ├── compile.log <3> -│ ├── compileClasspath.json -│ ├── compileIvyDeps.json -│ ├── enablePluginScalacOptions.json -│ ├── generatedSources.json -│ ├── ivyDeps.json -│ ├── javacOptions.json -│ ├── mandatoryIvyDeps.json -│ ├── mandatoryIvyDeps.super/ <4> -│ ├── mandatoryScalacOptions.json -│ ├── platformSuffix.json -│ ├── resolvedIvyDeps.json -│ ├── resolvedIvyDeps.log <3> -│ ├── resources.json -│ ├── scalaCompilerClasspath.json -│ ├── scalaLibraryIvyDeps.json -│ ├── scalaOrganization.json -│ ├── scalaVersion.json -│ ├── scalacOptions.json -│ ├── scalacOptions.super/ <4> -│ ├── scalacPluginClasspath.json -│ ├── scalacPluginIvyDeps.json -│ ├── scalacPluginIvyDeps.super/ <4> -│ ├── sources.json -│ ├── transitiveCompileIvyDeps.json -│ ├── transitiveIvyDeps.json -│ ├── transitiveLocalClasspath.json -│ ├── unmanagedClasspath.json -│ └── upstreamCompileOutput.json -├── mill-profile.json -└── mill-worker-VpZubuAK6LQHHN+3ojh1LsTZqWY=-1/ ----- - -<1> The `main` directory contains all files associated with target and submodules of the `main` module. -<2> The `compile` target has tried to access its scratch space via `T.dest`. Here you will find the actual compile results. -<3> Two targets printed something out while they ran. You can find these outputs in the `*.log` files. -<4> Three targets are overridden but re-use the result of their `super`-targets in some way. You can find these result under the `*.super/` path. - -== Target Metadata and Cached Files - -Each named task (``Target`` or ``Command``) that is run has a representation in the `out/` directory structure. - -The _module_ structure is reflected in the directories, so that each module of your project has a uniquely associated subdirectory under the `out/` directory. - -Each _target_ is associated with one or multiple files and directories under its module directory. -The following files can be found for a target `foo`: - -`foo.json`:: - the cache-key and JSON-serialized return-value of the -`Target`/`Command`. -The return-value can also be retrieved via `mill show foo.compile`. -Binary blobs are typically not included in `foo.json`, and instead stored as separate binary files in `foo.dest/` which are then referenced -by `foo.json` via `PathRef` references. - -`foo.dest/`:: - optional, a path for the `Task` to use either as a scratch space, or to place generated files that are returned -using `PathRef` references. -A `Task` should only output files within its own given `foo.dest/` folder (available as `T.dest`) to avoid -conflicting with another `Task`, but can name files within `foo.dest/` arbitrarily. - -`foo.log`:: - optional, the `stdout`/`stderr` of the `Task`. This is also streamed to the console during evaluation. - -`foo.super/`:: - optional, holds target metadata for overridden targets, so whenever you use a `super.foo()` in your `foo` target, you will find the metadata of the inherited task(s) under this directory. - - -The `out/` folder is intentionally kept simple and user-readable. -If your build is not behaving as you would expect, -feel free to poke around the various -`foo.dest/` folders to see what files are being created, or the `foo.json` files to see what is being returned by a -particular task. -You can also simply delete folders within `out/` if you want to force portions of your project to be -rebuilt, e.g. by deleting the `+out/main/+` or `+out/main/compile.*+` folders, but we strongly encourage you to use the xref:Scala_Builtin_Commands.adoc#_clean[`clean` command] instead. - -[WARNING] --- -Cleaning some target state by manually deleting files under `out/` may be convenient, but you need to be careful to always delete the `foo.json` file whenever you delete a `foo.dest/` or `foo.super/`. Otherwise, you risk running into hard to diagnose issues later. - -Instead, you should always give the `clean` command a try before manually deleting some file under `out/`. --- -== Other files in the `out/` directory - -There are also top-level build-related files in the `out/` folder, prefixed as `mill-*`. - -`mill-profile.json`:: - Probably the most useful file for you. It logs the tasks run and time taken for the last Mill command you executed. -This is very useful if Mill is being unexpectedly slow, and you want to find out exactly what tasks are being run. - -`mill-chrome-profile.json`:: - This file is only written if you run Mill in parallel mode, e.g. `mill --jobs 4`. This file can be opened in Google Chrome with the built-in `tracing:` protocol even while Mill is still running, so you get a nice chart of what's going on in parallel. - -`mill-worker-*/`:: - Each Mill server instance needs to keep some temporary files in one of these directories. Deleting it will also terminate the associated server instance, if it is still running. diff --git a/docs/modules/ROOT/pages/Plugin_BSP.adoc b/docs/modules/ROOT/pages/Plugin_BSP.adoc deleted file mode 100644 index 2b1631be794..00000000000 --- a/docs/modules/ROOT/pages/Plugin_BSP.adoc +++ /dev/null @@ -1,6 +0,0 @@ -= BSP - Build Server Protocol - -_The contrib.bsp plugin is no longer available. -The BSP server is now integrated in Mill by default._ - -Read on in the xref:Scala_Installation_IDE_Support.adoc#_build_server_protocol_bsp[Build Server Protocol] section. diff --git a/docs/modules/ROOT/pages/Scala_Build_Examples.adoc b/docs/modules/ROOT/pages/Scala_Build_Examples.adoc deleted file mode 100644 index ba972d70809..00000000000 --- a/docs/modules/ROOT/pages/Scala_Build_Examples.adoc +++ /dev/null @@ -1,89 +0,0 @@ -= Scala Build Examples - -++++ - -++++ - -On this page, we will explore the Mill build tool via a series of simple Scala -example projects. Each project demonstrates one particular feature of the Mill -build tool, and is also an executable codebase you can download and run. By the -end of this page, you will be familiar with how to configure Mill to work with -realistic Scala codebases: cross-building, testing, and publishing them. - - -Many of the APIs covered here are listed in the Scaladoc: - -* {mill-doc-url}/api/latest/mill/scalalib/ScalaModule.html[`mill.scalalib.ScalaModule`] -* {mill-doc-url}/api/latest/mill/main/RootModule.html[`mill.scalalib.RootModule`] -* {mill-doc-url}/api/latest/mill/scalalib/TestModule$.html[`mill.scalalib.TestModule`] -* {mill-doc-url}/api/latest/mill/scalalib/PublishModule.html[`mill.scalalib.PublishModule`] -* {mill-doc-url}/api/latest/mill/scalalib/CrossScalaModule.html[`mill.scalalib.CrossScalaModule`] -* {mill-doc-url}/api/latest/mill/scalalib/SbtModule.html[`mill.scalalib.SbtModule`] -* {mill-doc-url}/api/latest/mill/scalalib/CrossSbtModule.html[`mill.scalalib.CrossSbtModule`] -* {mill-doc-url}/api/latest/mill/scalalib/JavaModule.html[`mill.scalalib.JavaModule`] - -== Common Configuration Overrides - -include::example/scalalib/builds/1-common-config.adoc[] - -== Custom Tasks - -include::example/scalalib/builds/2-custom-tasks.adoc[] - -== Overriding Tasks - -include::example/scalalib/builds/3-override-tasks.adoc[] - -== Nesting Modules - -include::example/scalalib/builds/4-nested-modules.adoc[] - -== Publish Module - -include::example/scalalib/builds/6-publish-module.adoc[] - -== Cross-Scala-Version Modules - -include::example/scalalib/builds/7-cross-scala-version.adoc[] - -== SBT-Compatible Modules - -include::example/scalalib/builds/8-compat-modules.adoc[] - - -== Realistic Scala Example Project - -include::example/scalalib/builds/9-realistic.adoc[] - - -== Example Builds for Real Projects - -Mill comes bundled with example builds for real-world open-source projects, -demonstrating how Mill can be used to build code outside of tiny example codebases: - -=== Acyclic - -include::example/thirdparty/acyclic.adoc[] - -=== Fansi - -include::example/thirdparty/fansi.adoc[] - -== Real World Mill Builds - -=== Ammonite - -https://github.com/com-lihaoyi/Ammonite[Ammonite] is an ergonomic Scala REPL. - -=== Scala-CLI - -https://github.com/VirtusLab/scala-cli[Scala-CLI] is the primary CLI tool that -runs when you enter `scala` in the terminal. It is able to compile, test, run, -and package your Scala code in a variety of different ways - -=== Coursier - -https://github.com/coursier/coursier[Coursier] is a fast JVM dependency resolver, -used in many build tools down resolve and download third party dependencies \ No newline at end of file diff --git a/docs/modules/ROOT/pages/Scala_Builtin_Commands.adoc b/docs/modules/ROOT/pages/Scala_Builtin_Commands.adoc deleted file mode 100644 index dcacccbf1b1..00000000000 --- a/docs/modules/ROOT/pages/Scala_Builtin_Commands.adoc +++ /dev/null @@ -1,33 +0,0 @@ -= Built-in Commands - -:page-aliases: Scala_Builtin_Commands.adoc - -Mill comes with a number of useful commands out of the box. These are listed -in the Scaladoc: - -* {mill-doc-url}/api/latest/mill/main/MainModule.html[mill.main.MainModule] - -Mill's built-in commands are typically not directly related to building your -application code, but instead are utilities that help you understand and work -with your Mill build. - -include::example/scalalib/basic/4-builtin-commands.adoc[] - -== init - -[source,bash] ----- -> mill -i init com-lihaoyi/mill-scala-hello.g8 -.... -A minimal Scala project. - -name [Scala Seed Project]: hello - -Template applied in ./hello ----- - -The `init` command generates a project based on a Giter8 template. -It prompts you to enter project name and creates a folder with that name. -You can use it to quickly generate a starter project. -There are lots of templates out there for many frameworks and tools! - diff --git a/docs/modules/ROOT/pages/Scala_Installation_IDE_Support.adoc b/docs/modules/ROOT/pages/Scala_Installation_IDE_Support.adoc deleted file mode 100644 index 803f81d9dd8..00000000000 --- a/docs/modules/ROOT/pages/Scala_Installation_IDE_Support.adoc +++ /dev/null @@ -1,4 +0,0 @@ -= Installation and IDE Support -:page-aliases: Installation.adoc, IDE_Support.adoc - -include::partial$Installation_IDE_Support.adoc[] \ No newline at end of file diff --git a/docs/modules/ROOT/pages/Scala_Intro_to_Mill.adoc b/docs/modules/ROOT/pages/Scala_Intro_to_Mill.adoc deleted file mode 100644 index 03428323f24..00000000000 --- a/docs/modules/ROOT/pages/Scala_Intro_to_Mill.adoc +++ /dev/null @@ -1,85 +0,0 @@ -// Author Notes: -// -// This is the first page a user is expected to land on when learning about -// Mill. It is designed to be a quick, broad overview to get someone started: -// what is Mill, why should they care, and what some simple Mill builds look -// like and how to use them. We intentionally touch shallowly on a lot of -// topics without giving them a proper discussion, since the other pages have -// plenty of space to go in-depth. -// -// By the end of this page, a prospective Mill user should be familiar with -// what Mill is, hopefully have downloaded an example to try out, and be -// interested in learning more about the Mill build tool - -= Introduction to Mill for Scala - -++++ - -++++ - -:page-aliases: index.adoc, Intro_to_Mill.adoc, Intro_to_Mill_for_Scala.adoc - -:language: Scala - -include::partial$Intro_to_Mill_Header.adoc[] - -Mill is used to build many mainstream Scala projects, such as the -https://github.com/coursier/coursier[Coursier dependency resolver], -https://github.com/VirtusLab/scala-cli[Scala-CLI], and the -https://github.com/com-lihaoyi/Ammonite[Ammonite REPL] - -Mill borrows ideas from other tools like https://maven.apache.org/[Maven], -https://gradle.org/[Gradle], https://bazel.build/[Bazel], or https://www.scala-sbt.org/[SBT]. -It tries to learn from the strengths of each tool, while improving on their weaknesses. - -Compared to SBT: - -* **Mill makes customizing the build yourself much easier**: most of what build tools - do work with files and call subprocesses, and Mill makes doing that yourself easy. - This means you can always make your Mill build do exactly what you want, and are not - beholden to third-party plugins that may not exist, be well maintained, or interact well - with each other. - -* **Mill is much more performant**: SBT has enough overhead that even a dozen - subprojects is enough to slow it down, while Mill can handle hundreds of modules without issue. - Custom tasks in SBT re-execute every time, whereas in Mill they are cached automatically. - Mill's watch-for-changes-and-re-run implementation has much lower latency than SBT's. The - list of ways Mill improves upon SBT's performance is long, and at the command line you - can really feel it - -* **Mill builds are much easier to understand**: Your Mill build is made of bog-standard - ``object``s and ``def``s, rather than SBT's - https://eed3si9n.com/4th-dimension-with-sbt-013/[four-dimensional task matrix]. Your IDE's - "*jump-to-definition*" in Mill actually brings you to the implementation of a task, rather - than an SBT `taskKey` declaration. Customizing things is as simple as writing or overriding - `def`s. The net effect is that despite both tools' build files being written in Scala, - Mill's build files are much easier to understand and maintain. - -For a more detailed dive into the problems with SBT or how Mill improves upon them, check -out the following blog posts: - -- https://www.lihaoyi.com/post/SowhatswrongwithSBT.html[So, what's wrong with SBT?] -- https://www.lihaoyi.com/post/MillBetterScalaBuilds.html[Mill: Better Scala Builds] - -include::partial$Intro_to_Mill_BlogVideo.adoc[] - -If you are using Mill, you will find the following book by the Author useful in -using Mill and its supporting libraries to the fullest: - -* https://handsonscala.com/[Hands-on Scala Programming] - -== Simple Scala Module - -include::example/scalalib/basic/1-simple.adoc[] - -== Custom Build Logic - -include::example/scalalib/basic/2-custom-build-logic.adoc[] - -== Multi-Module Project - -include::example/scalalib/basic/3-multi-module.adoc[] - -include::partial$Intro_to_Mill_Footer.adoc[] diff --git a/docs/modules/ROOT/pages/Scala_Module_Config.adoc b/docs/modules/ROOT/pages/Scala_Module_Config.adoc deleted file mode 100644 index 88c96e31d6a..00000000000 --- a/docs/modules/ROOT/pages/Scala_Module_Config.adoc +++ /dev/null @@ -1,186 +0,0 @@ -= Scala Module Configuration - -++++ - -++++ - -:page-aliases: Configuring_Mill.adoc - -This page goes into more detail about the various configuration options -for `ScalaModule`. - -Many of the APIs covered here are listed in the Scaladoc: - -* {mill-doc-url}/api/latest/mill/scalalib/ScalaModule.html[mill.scalalib.ScalaModule] - - -== Compilation & Execution Flags - -include::example/scalalib/module/1-compilation-execution-flags.adoc[] - -== Adding Ivy Dependencies - -include::example/scalalib/module/2-ivy-deps.adoc[] - -== Runtime and Compile-time Dependencies - -include::example/scalalib/module/3-run-compile-deps.adoc[] - -== Classpath and Filesystem Resources - -include::example/scalalib/module/5-resources.adoc[] - -== Scala Compiler Plugins - -include::example/scalalib/module/6-scala-compiler-plugins.adoc[] - -== Scaladoc Config - -include::example/scalalib/module/7-docjar.adoc[] - -== Unmanaged Jars - -include::example/scalalib/module/8-unmanaged-jars.adoc[] - -== Specifying the Main Class - -include::example/scalalib/module/9-main-class.adoc[] - -== Downloading Non-Maven Jars - -include::example/scalalib/module/10-downloading-non-maven-jars.adoc[] - -== Customizing the Assembly - -include::example/scalalib/module/11-assembly-config.adoc[] - -== Repository Config - -include::example/scalalib/module/12-repository-config.adoc[] - -== Maven Central: Blocked! - -Under some circumstances (e.g. corporate firewalls), you may not have access maven central. The typical symptom will be error messages which look like this; - ----- -1 targets failed -mill.scalalib.ZincWorkerModule.classpath -Resolution failed for 1 modules: --------------------------------------------- - com.lihaoyi:mill-scalalib-worker_2.13:0.11.1 - not found: C:\Users\partens\.ivy2\local\com.lihaoyi\mill-scalalib-worker_2.13\0.11.1\ivys\ivy.xml - download error: Caught java.io.IOException (Server returned HTTP response code: 503 for URL: https://repo1.maven.org/maven2/com/lihaoyi/mill-scalalib-worker_2.13/0.11.1/mill-scalalib-worker_2.13-0.11.1.pom) while downloading https://repo1.maven.org/maven2/com/lihaoyi/mill-scalalib-worker_2.13/0.11.1/mill-scalalib-worker_2.13-0.11.1.pom ----- - -It is expected that basic commands (e.g. clean) will not work, as Mill saying it is unable to resolve it's own, fundamental, dependancies. Under such circumstances, you will normally have access to some proxy, or other corporate repository which resolves maven artefacts. The strategy is simply to tell mill to use that instead. - -The idea is to set an environment variable COURSIER_REPOSITORIES (see coursier docs). The below command should set the environment variable for the current shell, and then run the mill command. - ----- - COURSIER_REPOSITORIES=https://packages.corp.com/artifactory/maven/ mill resolve _ ----- - -If you are using millw, a more permanent solution could be to set the environment variable at the top of the millw script, or as a user environment variable etc. - -== Scoverage - -include::example/scalalib/module/13-contrib-scoverage.adoc[] - -== Unidoc - -include::example/scalalib/module/14-unidoc.adoc[] - -== Reformatting your code - -Mill supports code formatting via https://scalameta.org/scalafmt/[scalafmt] out of the box. - -To have a formatting per-module you need to make your module extend `mill.scalalib.scalafmt.ScalafmtModule`: - -.`build.mill` -[source,scala,subs="attributes,verbatim"] ----- -import mill._, scalalib._, scalafmt._ - -object foo extends ScalaModule with ScalafmtModule { - def scalaVersion = "{example-scala-2-13-version}" -} ----- - -Now you can reformat code with `mill foo.reformat` command, or only check for misformatted files with `mill foo.checkFormat`. - -You can also reformat your project's code globally with `+mill mill.scalalib.scalafmt.ScalafmtModule/reformatAll __.sources+` command, -or only check the code's format with `+mill mill.scalalib.scalafmt.ScalafmtModule/checkFormatAll __.sources+`. -It will reformat all sources that matches `+__.sources+` query. - -If you add a `.scalafmt.conf` file at the root of you project, it will be used -to configure formatting. It can contain a `version` key to specify the scalafmt -version used to format your code. See the -https://scalameta.org/scalafmt/docs/configuration.html[scalafmt configuration documentation] -for details. - - -== Using the Ammonite Repl / Scala console - -All ``ScalaModule``s have a `console` and a `repl` target, to start a Scala console or an Ammonite Repl. - -When using the `console`, you can configure its `scalac` options using the `consoleScalacOptions` target. - -For example, you may want to inherit all of your regular `scalacOptions` but disable `-Xfatal-warnings`: - -.Example: Using `consoleScalacOptions` to disable fatal warnings -[source,scala,subs="attributes,verbatim"] ----- -import mill._, scalalib._ - -object foo extends ScalaModule { - def consoleScalacOptions = scalacOptions().filterNot(o => o == "-Xfatal-warnings") -} ----- - -To use the `repl`, you can (and sometimes need to) customize the Ammonite version to work with your selected Scala version. -Mill provides a default Ammonite version, -but depending on the Scala version you are using, there may be no matching Ammonite release available. -In order to start the repl, you may have to specify a different available Ammonite version. - -.Example: Overriding `ammoniteVersion` to select a release compatible to the `scalaVersion` -[source,scala,subs="attributes,verbatim"] ----- -import mill._. scalalib._ - -object foo extends ScalaModule { - def scalaVersion = "2.12.6" - def ammoniteVersion = "2.4.0" -} ----- - -[TIP] --- -_Why is Ammonite tied to the exact Scala version?_ - -This is because Ammonite depends on the Scala compiler. -In contrast to the Scala library, compiler releases do not guarantee any binary compatibility between releases. -As a consequence, Ammonite needs full Scala version specific releases. - -The older your used Mill version or the newer the Scala version you want to use, the higher is the risk that the default Ammonite version will not match. --- - -== Disabling incremental compilation with Zinc - -By default all ``ScalaModule``s use incremental compilation via https://github.com/sbt/zinc[Zinc] to -only recompile sources that have changed since the last compile, or ones that have been invalidated -by changes to upstream sources. - -If for any reason you want to disable incremental compilation for a module, you can override and set -`zincIncrementalCompilation` to `false` - -.`build.mill` -[source,scala,subs="attributes,verbatim"] ----- -import mill._, scalalib._ - -object foo extends ScalaModule { - def zincIncrementalCompilation = false -} ----- diff --git a/docs/modules/ROOT/pages/Scala_Web_Examples.adoc b/docs/modules/ROOT/pages/Scala_Web_Examples.adoc deleted file mode 100644 index f329e53c3d8..00000000000 --- a/docs/modules/ROOT/pages/Scala_Web_Examples.adoc +++ /dev/null @@ -1,42 +0,0 @@ -= Scala Web Examples - -++++ - -++++ - -:page-aliases: Web_Build_Examples.adoc - -This page contains examples of using Mill as a build tool for web-applications. -It covers setting up a basic backend server, Todo-MVC app, topics like cache -busting, as well as usage of Scala.js both as standalone modules as well as -integrated with your backend Scala-JVM web server. - -== TodoMVC Web App - -include::example/scalalib/web/1-todo-webapp.adoc[] - -== Webapp Cache Busting - -include::example/scalalib/web/2-webapp-cache-busting.adoc[] - -== Scala.js Modules - -include::example/scalalib/web/3-scalajs-module.adoc[] - -== Scala.js Webserver Integration - -include::example/scalalib/web/4-webapp-scalajs.adoc[] - -== Scala.js/Scala-JVM Code Sharing - -include::example/scalalib/web/5-webapp-scalajs-shared.adoc[] - -== Publishing Cross-Platform Scala Modules - -include::example/scalalib/web/6-cross-version-platform-publishing.adoc[] - -== Publishing Cross-Platform Scala Modules Alternative - -include::example/scalalib/web/7-cross-platform-version-publishing.adoc[] \ No newline at end of file diff --git a/docs/modules/ROOT/pages/Structuring_Large_Builds.adoc b/docs/modules/ROOT/pages/Structuring_Large_Builds.adoc deleted file mode 100644 index a38af1c6f09..00000000000 --- a/docs/modules/ROOT/pages/Structuring_Large_Builds.adoc +++ /dev/null @@ -1,15 +0,0 @@ -= Structuring Large Builds - -== Multi-file Builds - -include::example/depth/large/10-multi-file-builds.adoc[] - -== Helper Files - -include::example/depth/large/11-helper-files.adoc[] - -== Legacy `.sc` extension - -include::example/depth/large/12-helper-files-sc.adoc[] - - diff --git a/docs/modules/ROOT/pages/Target_Query_Syntax.adoc b/docs/modules/ROOT/pages/Target_Query_Syntax.adoc deleted file mode 100644 index 100e20e65f3..00000000000 --- a/docs/modules/ROOT/pages/Target_Query_Syntax.adoc +++ /dev/null @@ -1,148 +0,0 @@ -= Target Query Syntax - -When interacting with Mill from the CLI, you often need to select targets or modules. -In most places, where Mill accepts a target, it really accepts a target selector query, which is the name of a target in its simplest form, but it can also contain wildcards, type pattern and other special syntax, making it a powerful tool to select specific targets. - -== Selecting dedicated targets - -When invoking Mill, the simplest way to run a target is to give it with a fully qualified names. - -Examples: - ----- -> mill for.compile -> mill for.run hello world -> mill foo.testCached ----- - -.Understanding target paths and path segments -**** - -Each Mill module and target has a unique path. -Each part of the path is called _segment_. -Segments are separated with a dot (`.`). -They look like regular Scala class name qualifiers. - -There are two kind of segments: _label segments_ and _cross segments_. - -_Label segments_ are the components of a target path and have the same restriction as Scala identifiers. -They must start with a letter and may contain letters, numbers and a limited set of special characters `-` (dash), `_` (underscore). -They are used to denote Mill modules, tasks, but in the case of xref:Modules.adoc#external-modules[external modules] their Scala package names. - -_Cross segments_ start with a label segment but contain additional square brackets (`[`, `]`]) and are used to denote cross module and their parameters. - -NOTE: Segments can be surrounded by parentheses (`(`, `)`)). -When combined with <> which contain dots (`.`), the parentheses need to be used, to avoid the dots to being interpreted as path separators. - -**** - -[#select-multiple-targets] -== Selecting multiple targets - -If you want to select more than one target, you have multiple options: - -* <> -* <> -* <> -* <> - -You can also combine these techniques to properly select your targets - -[#enumerations] -== Enumerations - -Enumeration are denoted by curly braces (`{`, `}`). -Inside the curly braces you can place two or more selector paths, separated with a comma (`,`). - -Examples: - -* `{foo,bar}` simple enumerates two targets, `foo` and `bar` -* `foo.{compile,run}` expands to `foo.compile` and `foo.run` -* `+{_,foo.bar}.baz+` expands to `+_.baz+` and `foo.bar.baz` - -[TIP] -==== -Some Shells like `bash` support curly braces expansion. -Make sure to properly mask the selector path, e.g. by putting it in quotes. - -[bash] ----- -mill "foo.{compile.run}" ----- -==== - -[#wildcards] -== Wildcard selections - -There are two wildcards, you can use as path segment. - -* `+_+` The single underscore acts as a placeholder for a single segment. - -* `+__+` The double underscore acts as a placeholder for many segments. -In particular, it can represent an empty segment. - -With wildcards, you can get explicit control over the position of a target in the build tree. - -E.g. the filter `+_._._.jar+` will match all jar targets, that are on the third-level of the build tree. - -[#type-filters] -== Type filters for wildcard selections - -Type filters are always combined with wildcard. -They are used to limit the scope of the wildcard to only match path segments of the specified types. -For module paths this means, the represented module needs to be an instance of the specified type. - -A type filter always starts with a wildcard (`+_+`, `+__+`) followed by a colon (`:`) and finally the _type qualifier_. - -The type is matched by its name and optionally by its enclosing types and packages, separated by a `.` sign. -Since this is also used to separate target path segments, a type selector segment containing a `.` needs to be enclosed in parentheses. -A fully qualified type can be denoted with the `+_root_+` package. - -[sh] ----- -> mill resolve __:TestModule.jar -> mill resolve "(__:scalalib.TestModule).jar" -> mill resolve "(__:mill.scalalib.TestModule).jar" -> mill resolve "(__:_root_.mill.scalalib.TestModule).jar" ----- - -If the type qualifier starts with a `^` or `!`, it's only matching types which are _not_ instances of the specified type. - -[sh] ----- -> mill resolve __:^TestModule.jar ----- - -You can also add more than one type filters to a wildcard. - -[sh] ----- -> mill resolve "__:JavaModule:^ScalaModule:^TestModule.jar" ----- - -NOTE: Type filter are currently only supported for module selections, but not for target selections. -That means, you can't filter based on the result type of a target. - -[#add-target-selector] -== Start a new target selector with `+` - -On the Mill CLI you can also start a complete new target selector with the `+` sign. - -There is a subtile difference between the expansion of <>, <> and <> in contrast to the <>. - -For all the former versions, Mill parses them into a complex but single target selector path and subsequent parameters are used for all resolved targets. - -Whereas the `+` start a completely new selector path to which you can also provide a different parameter list. This is important when using xref:Tasks.adoc#commands[command targets] which can accept their own parameters. The `JavaModule.run` command is an example. - ----- -> mill foo.run hello # <1> -> mill {foo,bar}.run hello # <2> -> mill __:JavaModule:^TestModule.run hello # <3> -> mill foo.run hello + bar.run world # <4> ----- - -<1> Runs `foo.run` with the parameter `hello` -<2> Expands to `foo.run` and `bar.run` and runs both with the parameter `hello`. -<3> Selects the `run` command of all Java modules, but not test moudles, and runs them with the parameter `hello`. -<4> Runs `fun.run` with the parameter `hello` and `bar.run` with the parameter `world`. - diff --git a/docs/modules/ROOT/pages/Tasks.adoc b/docs/modules/ROOT/pages/Tasks.adoc deleted file mode 100644 index d6c8836f039..00000000000 --- a/docs/modules/ROOT/pages/Tasks.adoc +++ /dev/null @@ -1,56 +0,0 @@ -= Tasks - -One of Mill's core abstractions is its _Task Graph_: this is how Mill defines, -orders and caches work it needs to do, and exists independently of any support -for building Scala. - -Mill target graphs are primarily built using methods and macros defined on -`mill.define.Target`, aliased as `T` for conciseness: - -- {mill-doc-url}/api/latest/mill/define/Target$.html[mill.define.Target] - -== Task Cheat Sheet - -The following table might help you make sense of the small collection of -different Task types: - -[cols="<,<,<,<,<,<,<"] -|=== -| |Target |Command |Source/Input |Anonymous Task |Persistent Target |Worker - -|Cached to Disk |X | | | |X | -|JSON Writable |X |X |X| |X | -|JSON Readable |X | | | |X | -|CLI Runnable |X |X | | |X | -|Takes Arguments | |X | |X | | -|Cached In-Memory | | | | | |X -|=== - -include::example/depth/tasks/1-task-graph.adoc[] - -[#primitive-tasks] -== Primary Tasks - -include::example/depth/tasks/2-primary-tasks.adoc[] - -== Other Tasks - -=== Anonymous Tasks - -include::example/depth/tasks/3-anonymous-tasks.adoc[] - -=== Inputs - -include::example/depth/tasks/4-inputs.adoc[] - -=== Persistent Targets - -include::example/depth/tasks/5-persistent-targets.adoc[] - -=== Workers -include::example/depth/tasks/6-workers.adoc[] - - -== Using ScalaModule.run as a task - -include::example/depth/tasks/11-module-run-task.adoc[] diff --git a/docs/modules/ROOT/pages/Testing_Java_Projects.adoc b/docs/modules/ROOT/pages/Testing_Java_Projects.adoc deleted file mode 100644 index 7ccf2cb4abc..00000000000 --- a/docs/modules/ROOT/pages/Testing_Java_Projects.adoc +++ /dev/null @@ -1,22 +0,0 @@ -= Testing Java Projects - -++++ - -++++ - -This page will discuss common topics around working with test suites using the Mill build tool - -== Defining Unit Test Suites - -include::example/javalib/testing/1-test-suite.adoc[] - - -== Test Dependencies - -include::example/javalib/testing/2-test-deps.adoc[] - -== Defining Integration Test Suites - -include::example/javalib/testing/3-integration-suite.adoc[] \ No newline at end of file diff --git a/docs/modules/ROOT/pages/Testing_Scala_Projects.adoc b/docs/modules/ROOT/pages/Testing_Scala_Projects.adoc deleted file mode 100644 index 23736381791..00000000000 --- a/docs/modules/ROOT/pages/Testing_Scala_Projects.adoc +++ /dev/null @@ -1,22 +0,0 @@ -= Testing Scala Projects - -++++ - -++++ - -This page will discuss common topics around working with test suites using the Mill build tool - -== Defining Unit Test Suites - -include::example/scalalib/testing/1-test-suite.adoc[] - - -== Test Dependencies - -include::example/scalalib/testing/2-test-deps.adoc[] - -== Defining Integration Test Suites - -include::example/scalalib/testing/3-integration-suite.adoc[] \ No newline at end of file diff --git a/docs/modules/ROOT/pages/The_Mill_Evaluation_Model.adoc b/docs/modules/ROOT/pages/The_Mill_Evaluation_Model.adoc deleted file mode 100644 index 3c494898925..00000000000 --- a/docs/modules/ROOT/pages/The_Mill_Evaluation_Model.adoc +++ /dev/null @@ -1,144 +0,0 @@ -= The Mill Evaluation Model - -Evaluating a Mill target typically goes through the following phases: - -1. *Compilation*: Mill compiles the `build.mill` to classfiles, following the -<<_the_mill_bootstrapping_process>> to eventually produce a `RootModule` object - -2. *Resolution*: Mill resolves the list of xref:Tasks.adoc[] given from the command line, - e.g. `resolve _` or `foo.compile` or `{bar,qux}.__.test`, to a list of - concrete `Task` objects nested on xref:Modules.adoc[] within the `RootModule` along - with their transitive dependencies - - * In the process, the relevant Mill ``Module``s are lazily instantiated - -3. *Evaluation*: Mill evaluates the gathered ``Task``s in dependency-order, - either serially or in parallel - -== Limitations of the Mill Evaluation Model - -This three-phase evaluation model has consequences for how you structure your -build. For example: - -1. You can have arbitrary code outside of ``Task``s that helps - set up your task graph and module hierarchy, e.g. computing what keys exist - in a `Cross` module, or specifying your `def moduleDeps` - -2. You can have arbitrary code inside of ``Task``s, to perform your build - actions - -3. *But* your code inside of ``Task``s cannot influence the shape of the task - graph or module hierarchy, as all *Resolving* and *Planning* happens first - *before* any ``Task``s are evaluated. - -This should not be a problem for most builds, but it is something to be aware -of. In general, we have found that having "two places" to put code - outside of -``Task``s to run during *Planning* or inside of ``Task``s to run during -*Evaluation* - is generally enough flexibility for most use cases. - -The hard boundary between these two phases is what lets users easily query -and visualize their module hierarchy and task graph without running them: using -xref:Scala_Builtin_Commands.adoc#inspect[inspect], xref:Scala_Builtin_Commands.adoc#plan[plan], -xref:Scala_Builtin_Commands.adoc#_visualize[visualize], etc.. This helps keep your -Mill build discoverable even as the `build.mill` codebase grows. - -== Caching at Each Layer of the Evaluation Model - -Apart from fine-grained caching of ``Task``s during *Evaluation*, Mill also -performs incremental evaluation of the other phases. This helps ensure -the overall workflow remains fast even for large projects: - -1. *Compilation*: - - * Done on-demand and incrementally using the Scala - incremental compiler https://github.com/sbt/zinc[Zinc]. - - * If some of the files `build.mill` imported changed but not others, only the - changed files are re-compiled before the `RootModule` is re-instantiated - - * In the common case where `build.mill` was not changed at all, this step is - skipped entirely and the `RootModule` object simply re-used from the last - run. - -2. *Planning*: - - * If the `RootModule` was re-used, then all - previously-instantiated modules are simply-re-used - -3. *Evaluation*: - - * ``Task``s are evaluated in dependency order - - * xref:Tasks.adoc#_targets[Target]s only re-evaluate if their input ``Task``s - change. - - * xref:Tasks.adoc#_persistent_targets[T.persistent]s preserve the `T.dest` folder on disk between runs, - allowing for finer-grained caching than Mill's default target-by-target - caching and invalidation - - * xref:Tasks.adoc#_workers[T.worker]s are kept in-memory between runs where possible, and only - invalidated if their input ``Task``s change as well. - - * ``Task``s in general are invalidated if the code they depend on changes, - at a method-level granularity via callgraph reachability analysis. See - https://github.com/com-lihaoyi/mill/pull/2417[#2417] for more details - -This approach to caching does assume a certain programming style inside your -Mill build: we may-or-may-not re-instantiate the modules in your -`build.mill` and we may-or-may-not re-execute any particular task depending on caching, -but your code needs to work either way. Furthermore, task ``def``s and module `object`s in your -build are instantiated lazily on-demand, and your code needs to work regardless -of which order they are executed in. For code written in a typical Scala style, -which tends to avoid side effects, this is not a problem at all. - -One thing to note is for code that runs during *Resolution*: any reading of -external mutable state needs to be wrapped in an `interp.watchValue{...}` -wrapper. This ensures that Mill knows where these external reads are, so that -it can check if their value changed and if so re-instantiate `RootModule` with -the new value. - -== The Mill Bootstrapping Process - -Mill's bootstrapping proceeds roughly in the following phases: - -1. If using the bootstrap script, it first checks if the right version of Mill -is already present, and if not it downloads it to `~/.mill/download` - -2. It instantiates an in-memory `MillBuildRootModule.BootstrapModule`, -which is a hard-coded `build.mill` used for bootstrapping Mill - -3. If there is a meta-build present `mill-build/build.mill`, it processes that -first and uses the `MillBuildRootModule` returned for the next steps. -Otherwise it uses the `MillBuildRootModule.BootstrapModule` directly - -4. Mill evaluates the `MillBuildRootModule` to parse the `build.mill`, generate -a list of `ivyDeps` as well as appropriately wrapped Scala code that we can -compile, and compiles it to classfiles - -5. Mill loads the compiled classfiles of the `build.mill` into a -`java.lang.ClassLoader` to access it's `RootModule` - -Everything earlier in the doc applies to each level of meta-builds in the -Mill bootstrapping process as well. - -In general, `.sc` files, `import $file`, and `import $ivy` can be thought of as -a short-hand for configuring the meta-build living in `mill-build/build.mill`: - -1. `.sc` and `import $file` are a shorthand for specifying the `.scala` files - living in `mill-build/src/` - -2. `import $ivy` is a short-hand for configurin the `def ivyDeps` in - `mill-build/build.mill` - -Most builds would not need the flexibility of a meta-build's -`mill-build/build.mill`, but it is there if necessary. - -Mill supports multiple levels of meta-builds for bootstrapping: - -- Just `build.mill` -- One level of meta-builds: `mill-build/build.mill` and `build.mill` -- Two level of meta-builds: `mill-build/mill-build/build.mill`, - `mill-build/build.mill` and `build.mill` - -xref:The_Mill_Meta_Build.adoc[The Mill Meta Build] works through a simple use case -and example for meta-builds. \ No newline at end of file diff --git a/docs/modules/ROOT/pages/The_Mill_Meta_Build.adoc b/docs/modules/ROOT/pages/The_Mill_Meta_Build.adoc deleted file mode 100644 index cba1c33522e..00000000000 --- a/docs/modules/ROOT/pages/The_Mill_Meta_Build.adoc +++ /dev/null @@ -1,48 +0,0 @@ - -= The Mill Meta-Build - -The meta-build manages the compilation of the `build.mill`. -If you don't configure it explicitly, a built-in synthetic meta-build is used. - -To customize it, you need to explicitly enable it with `import $meta._`. -Once enabled, the meta-build lives in the `mill-build/` directory. -It needs to contain a top-level module of type `MillBuildRootModule`. - -Meta-builds are recursive, which means, it can itself have a nested meta-builds, and so on. - -To run a task on a meta-build, you specifying the `--meta-level` option to select the meta-build level. - -== Autoformatting the `build.mill` - -As an example of running a task on the meta-build, you can format the `build.mill` with Scalafmt. -Everything is already provided by Mill. -You only need a `.scalafmt.conf` config file which at least needs configure the Scalafmt version. - -.Run Scalafmt on the `build.mill` (and potentially included files) ----- -$ mill --meta-level 1 mill.scalalib.scalafmt.ScalafmtModule/reformatAll sources ----- - -* `--meta-level 1` selects the first meta-build. Without any customization, this is the only built-in meta-build. -* `mill.scalalib.scalafmt.ScalafmtModule/reformatAll` is a generic task to format scala source files with Scalafmt. It requires the targets that refer to the source files as argument -* `sources` this selects the `sources` targets of the meta-build, which at least contains the `build.mill`. - -== Finding plugin updates - -Mill plugins are defined as `ivyDeps` in the meta-build. -Hence, you can easily search for updates with the external `mill.scalalib.Dependency` module. - -.Check for Mill Plugin updates ----- -$ mill --meta-level 1 mill.scalalib.Dependency/showUpdates -Found 1 dependency update for -de.tototec:de.tobiasroeser.mill.vcs.version_mill0.11_2.13 : 0.3.1-> 0.4.0 ----- - -== Sharing Libraries between `build.mill` and Application Code - -include::example/extending/metabuild/4-meta-build.adoc[] - -== Sharing Source Code between `build.mill` and Application Code - -include::example/extending/metabuild/5-meta-shared-sources.adoc[] \ No newline at end of file diff --git a/docs/modules/ROOT/pages/Thirdparty_Plugins.adoc b/docs/modules/ROOT/pages/Thirdparty_Plugins.adoc deleted file mode 100644 index 0a76efa3789..00000000000 --- a/docs/modules/ROOT/pages/Thirdparty_Plugins.adoc +++ /dev/null @@ -1,1100 +0,0 @@ -= Third-Party Plugins -:page-aliases: Thirdparty_Modules.adoc - -The Plugins in this section are developed/maintained outside the mill git tree. -This list is most likely not complete. -If you wrote a Mill plugin or find that one is missing in this list, please open a {mill-github-url}/pulls[pull request] and add that plugin with a short description (in alphabetical order). - -For details about including plugins in your `build.mill` read xref:Using_Plugins.adoc[Using Mill Plugins]. - -CAUTION: Besides the documentation provided here, we urge you to consult the respective linked plugin documentation pages. -The usage examples given here are most probably incomplete and sometimes outdated! - -Additional to this list, you can also search the https://github.com/topics/mill-plugin[`mill-plugin` topic on GitHub] for more plugins. - - -== Aliases - -This plugin adds an opinionated way of defining aliases to Mill builds. Added aliases are global and applied to the whole build. - -Project home: https://github.com/carlosedp/mill-aliases - -[source,scala] ----- -import mill._, scalalib._ -import $ivy.`com.carlosedp::mill-aliases::0.2.1` -import com.carlosedp.aliases._ - -object foo extends ScalaModule { - ... -} - -object MyAliases extends Aliases { - def testall = alias("__.test") - def compileall = alias("__.compile") - def comptestall = alias("__.compile", "__.test") -} ----- - -To show all the defined aliases: - -```sh -./mill Alias/list -``` - -Run an alias: - -```sh -./mill Alias/run testall -``` - -When run, each aliased task is checked if valid. - -== Antlr - -https://www.antlr.org/[ANTLR parser generator] support for mill. - -Project home: https://github.com/ml86/mill-antlr - - -[source,scala] ----- -import $ivy.`net.mlbox::mill-antlr:0.1.0` -import net.mlbox.millantlr.AntlrModule - -object foo extends ScalaModule with AntlrModule { - override def antlrGrammarSources = T.sources { - Seq(os.pwd/"someGrammar.g4").map(PathRef(_)) - } -} ----- - -== AspectJ - -https://projects.eclipse.org/projects/tools.aspectj[AspectJ compiler] support for mill. - -Project home: https://github.com/lefou/mill-aspectj - - -[source,scala] ----- -import mill._ -import mill.scalalib._ -import mill.define._ - -// Load the plugin from Maven Central via ivy/coursier -import $ivy.`de.tototec::de.tobiasroeser.mill.aspectj_mill0.9:0.3.1-12-89db01 -import de.tobiasroeser.mill.aspectj._ - -object main extends AspectjModule { - - // Select the AspectJ version - def aspectjVersion = "1.9.5" - - // Set AspectJ options, e.g. the language level and annotation processor - // Run `mill main.ajcHelp` to get a list of supported options - def ajcOptions = Seq("-8", "-proc:none") - -} ----- - -For documentation, please refer to the https://github.com/lefou/mill-aspectj[project home page]. - -== Bash Completion - -Limited bash completion support. - -Project home: https://github.com/lefou/mill-bash-completion - -== Bundler - -`mill-bundler` is comparable to `scalajs-bundler` for SBT: It manages NPM dependencies for a Scala.js module and -bundling it. Currently Webpack and Rollup are implemented but it's easy to use another one. - -Project home: https://github.com/nafg/mill-bundler - - -== CI Release - -`mill-ci-release` is a wrapper around the existing publish functionality of -Mill with the aim to making releasing your project in GitHub Actions to Maven -easier by automating common setup such as setting up gpg in CI, setting up -versioning, and ensuring merges to into your main branch get published as a -SNAPSHOT. If you're coming from sbt, then you're likely familiar with -https://github.com/sbt/sbt-ci-release[`sbt-ci-release`] which this plugin -imitates. - -Project home: https://github.com/ckipp01/mill-ci-release - - -To get started, you'll want to use `CiReleaseModule` as a drop in replacement -where you'd normally use the Mill `PublishModule` and then ensure you implement -everything that `PublishModule` requires. - -Secondly, you'll need to ensure you have a few environment variables correctly -set in your GitHub repo. You can see detailed instuctions on which are -necessary https://github.com/ckipp01/mill-ci-release#secrets[here]. - -Then in CI to publish you'll simply issue a single command: - -[source,yaml] ----- -- run: mill -i io.kipp.mill.ci.release.ReleaseModule/publishAll ----- - -This will automatically grab all the artifacts that you've defined to publish -in your build and publish them. Your version will automatically be managed by -https://github.com/lefou/mill-vcs-version[`mill-vcs-version`] and if your -version ends in `-SNAPSHOT` you're project will be published to Sonatype -Snapshots or to the normal releases if it's a new tag. - -== Daemon - -Use mill as a launcher for self-building systemd daemons, -convenient for handling of code-as-config, or quick editing and rebuilding -of code-generating templates. - -Project home: https://github.com/swaldman/mill-daemon - -Place the millw script from https://github.com/lefou/millw in your project directory. - -./opt/coolproj/build.mill ----- -import $ivy.`com.mchange::mill-daemon:0.0.1` -import com.mchange.milldaemon.DaemonModule - -object coolproj extends RootModule with DaemonModule { - override def runDaemonPidFile = Some( os.pwd / "coolproj.pid" ) -} ----- - -./opt/coolproj/rebuild-and-start ----- -#!/bin.bash - -./millw runMainDaemon coolproj.Main "$@" ----- - -./opt/coolproj/coolproj.service ----- -[Unit] -Description=Cool Project -After=syslog.target network.target - -[Service] -Type=forking -PIDFile=/opt/coolproj/coolproj.pid -User=coolproj -Group=coolproj -WorkingDirectory=/opt/coolproj -ExecStart=/opt/coolproj/rebuild-and-start -Restart=on-failure - -[Install] -WantedBy=multi-user.target ----- - -Symlink `/opt/coolproj/coolproj.service` from `/etc/systemd/system`, then `systemctl restart coolproj`. - -== DGraph - -Show transitive dependencies of your build in your browser. - -Project home: https://github.com/ajrnz/mill-dgraph - - -[source,scala] ----- -import $ivy.`com.github.ajrnz::mill-dgraph:0.2.0` ----- - -[source,sh] ----- -sh> mill plugin.dgraph.browseDeps(proj)() ----- - -== Docker Native-Image Packager - -This plugin allows building Docker container images with GraalVM Native-Image -binaries for cloud-native and fast-startup applications. - -Project home: https://github.com/carlosedp/mill-docker-nativeimage - -Import the plugin, extend your module with `DockerNative` and configure the -parameters for your application using the `DockerNativeConfig` trait in the -`dockerNative` object. - -[source,scala] ----- -import mill._, mill.scalalib._, mill.scalalib.scalafmt._ -import $ivy.`com.carlosedp::mill-docker-nativeimage::0.6.0` -import com.carlosedp.milldockernative.DockerNative - -object hello extends ScalaModule with DockerNative { - def scalaVersion = "3.3.0" - object dockerNative extends DockerNativeConfig { - // Native Image parameters - def nativeImageName = "hello" - def nativeImageGraalVmJvmId = T("graalvm-java17:22.3.2") - def nativeImageClassPath = runClasspath() - def nativeImageMainClass = "com.domain.Hello.Hello" - // GraalVM parameters depending on your application needs - def nativeImageOptions = Seq( - "--no-fallback", - "--enable-url-protocols=http,https", - "-Djdk.http.auth.tunneling.disabledSchemes=", - ) ++ (if (sys.props.get("os.name").contains("Linux")) Seq("--static") else Seq.empty) - - // Generated Docker image parameters - def baseImage = "ubuntu:22.04" - def tags = List("docker.io/myuser/helloapp") - def exposedPorts = Seq(8080) - } -} ----- - -[source,sh] ----- -./mill hello.dockerNative.build() -# Test run -docker run -it --rm docker.io/myuser/helloapp - -# Push to a registry -./mill hello.dockerNative.push ----- - -For more details and configuration options, please refer to the project readme -and also check the provided example code. - -== Docusaurus 2 - -Simple Docusaurus runner for Mill - -The plugin provides a mill module that allows to build the project web site using https://docusaurus.io/[Docusaurus 2] as a static content generator. - -Project home. https://github.com/atooni/mill-docusaurus2 - - -== Ensime - -Create an http://ensime.github.io/[.ensime] file for your build. - -Project home: https://github.com/davoclavo/mill-ensime - - -[source,scala] ----- -import mill._ -interp.repositories() = - interp.repositories() ++ Seq(coursier.MavenRepository("https://jitpack.io")) - -@ - -import $ivy.`com.github.yyadavalli::mill-ensime:0.0.2` ----- - -You can then run the following to generate the .ensime file - -[source,sh] ----- -mill fun.valycorp.mill.GenEnsime/ensimeConfig ----- - -Optionally, you can specify the ensime server version using the –server flag like - -[source,sh] ----- -mill fun.valycorp.mill.GenEnsime/ensimeConfig --server "3.0.0-SNAPSHOT" ----- - -== Explicit Deps - -A plugin that checks that `ivyDeps` and `ivyCompileDeps` accurately reflect the direct dependencies of your source code. - -Project home: https://github.com/kierendavies/mill-explicit-deps - - -.`build.mill` -[source,scala] ----- -import $ivy.`io.github.kierendavies::mill-explicit-deps::0.1.0` -import io.github.kierendavies.mill.explicitdeps.ExplicitDepsModule - -object foo extends ScalaModule with ExplicitDepsModule { - // ... -} ----- - -[source,shell script] ----- -> mill foo.checkExplicitDeps -[37/37] main.checkExplicitDeps -Found undeclared dependencies: (add these to ivyDeps) - ivy"org.typelevel::cats-kernel:2.7.0", - -Found unimported dependencies: (remove these from ivyDeps) - ivy"org.typelevel::cats-effect:3.3.6", - -1 targets failed -main.checkExplicitDeps Found 1 undeclared dependencies, 1 unimported dependencies ----- - -== Fish Completion - -Limited fish completion support. - -Project home: https://github.com/ckipp01/mill-fish-completions - -== Giter8 - -A plugin to test the generation of your -http://www.foundweekends.org/giter8/index.html[Giter8] template and expected -working targets for your template after generation. - -Project home: https://github.com/ckipp01/mill-giter8 - - -.`build.mill` -[source,scala] ----- -import $ivy.`io.chris-kipp::mill-giter8::0.2.0` - -import io.kipp.mill.giter8.G8Module - -object g8 extends G8Module { - override def validationTargets = - Seq("example.compile", "example.fix", "example.reformat") -} ----- - -The most common target you'd then use is `mill g8.validate`. - -== Git - -A git version plugin for mill. - -Project home: https://github.com/joan38/mill-git - -_build.sc_: - -[source,scala] ----- -import $ivy.`com.goyeau::mill-git:` -import com.goyeau.mill.git.GitVersionedPublishModule -import mill.scalalib.JavaModule -import mill.scalalib.publish.{Developer, License, PomSettings, VersionControl} - -object `jvm-project` extends JavaModule with GitVersionedPublishModule { - override def pomSettings = PomSettings( - description = "JVM Project", - organization = "com.goyeau", - url = "https://github.com/joan38/mill-git", - licenses = Seq(License.MIT), - versionControl = VersionControl.github("joan38", "mill-git"), - developers = Seq(Developer("joan38", "Joan Goyeau", "https://github.com/joan38")) - ) -} ----- - -[source,shell script] ----- -> mill show jvm-project.publishVersion -[1/1] show -[2/2] com.goyeau.mill.git.GitVersionModule.version -"0.0.0-470-6d0b3d9" ----- - -== GitHub Dependency Graph Submission - -A plugin to submit your mill dependency graph to GiHub through their -https://github.blog/2022-06-17-creating-comprehensive-dependency-graph-build-time-detection/[Dependency -Submission API]. - -Project home: https://github.com/ckipp01/mill-github-dependency-graph - - -The easiest way to use this plugin is with the -https://github.com/ckipp01/mill-github-dependency-graph[mill-dependency-submission] -action. You can add it as a workflow: - -[source,yaml] ----- -name: github-dependency-graph - -on: - push: - branches: - - main - -jobs: - submit-dependency-graph: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: coursier/cache-action@v6 - - uses: actions/setup-java@v3 - with: - distribution: 'temurin' - java-version: '17' - - uses: ckipp01/mill-dependency-submission@v1 ----- - - -== Hepek - -`mill-hepek` is a plugin for writing Scala `object`s to files. - -It is used as a core for Hepek static site generator: https://sake92.github.io/hepek/hepek/index.html . - -Project home: https://github.com/sake92/mill-hepek - - - -== Integration Testing Mill Plugins - -Integration testing for mill plugins. - -Project home: https://github.com/lefou/mill-integrationtest - - -We assume, you have a mill plugin named `mill-demo` - -[source,scala] ----- -// build.mill -import mill._, mill.scalalib._ -object demo extends ScalaModule with PublishModule { - // ... -} ----- - -Add a new test sub-project, e.g. `itest`. - -[source,scala] ----- -// build.sc -import $ivy.`de.tototec::de.tobiasroeser.mill.integrationtest_mill0.9:0.4.0` -import de.tobiasroeser.mill.integrationtest._ - -object demo extends ScalaModule with PublishModule { - // ... -} - -object itest extends MillIntegrationTestModule { - - def millTestVersion = "0.9.3" - - def pluginsUnderTest = Seq(demo) - -} ----- - -Your project should now look similar to this: - -[source,text] ----- -. -+-- demo/ -| +-- src/ -| -+-- it/ - +-- src/ - +-- 01-first-test/ - | +-- build.sc - | +-- src/ - | - +-- 02-second-test/ - +-- build.sc ----- - -As the buildfiles `build.sc` in your test cases typically want to access the locally built plugin(s), -the plugins publishes all plugins referenced under `pluginsUnderTest` to a temporary ivy repository, just before the test is executed. -The mill version used in the integration test then used that temporary ivy repository. - -Instead of referring to your plugin with `import $ivy.'your::plugin:version'`, -you can use the following line instead, which ensures you will use the correct locally build plugins. - -[source,scala] ----- -// build.sc -import $exec.plugins ----- - -Effectively, at execution time, this line gets replaced by the content of `plugins.sc`, a file which was generated just before the test started to execute. - -Please always refer to the https://github.com/lefou/mill-integrationtest[official plugin documentation site] for complete and up-to-date information. - -== JaCoCo - Code Coverage - -Mill plugin to collect test coverage data with https://www.jacoco.org/jacoco/[JaCoCo] and generate reports. - -Plugin home: https://github.com/lefou/mill-jacoco - - - -== JBake - -Create static sites/blogs with JBake. - -Plugin home: https://github.com/lefou/mill-jbake - -JBake home: https://jbake.org - - -[source,scala] ----- -// build.sc -import mill._ -import $ivy.`de.tototec::de.tobiasroeser.mill.jbake:0.1.0` -import de.tobiasroeser.mill.jbake._ - -object site extends JBakeModule { - - def jbakeVersion = "2.6.4" - -} ----- - -Generate the site: - -[source,sh] ----- -bash> mill site.jbake ----- - -Start a local Web-Server on Port 8820 with the generated site: - -[source,sh] ----- -bash> mill site.jbakeServe ----- - -== JBuildInfo - -This is a https://www.lihaoyi.com/mill/[mill] module similar to -https://www.lihaoyi.com/mill/page/contrib-modules.html#buildinfo[BuildInfo] -but for Java. -It will generate a Java class containing information from your build. - -Project home: https://github.com/carueda/mill-jbuildinfo - -To declare a module that uses this plugin, extend the -`com.github.carueda.mill.JBuildInfo` trait and provide -the desired information via the `buildInfoMembers` method: - -[source,scala] ----- -// build.sc -import $ivy.`com.github.carueda::jbuildinfo:0.1.2` -import com.github.carueda.mill.JBuildInfo -import mill.T - -object project extends JBuildInfo { - def buildInfoMembers: T[Map[String, String]] = T { - Map( - "name" -> "some name", - "version" -> "x.y.z" - ) - } -} ----- - -This will generate: - -[source,java] ----- -// BuildInfo.java -public class BuildInfo { - public static final String getName() { return "some name"; } - public static final String getVersion() { return "x.y.z"; } -} ----- - - -* `def buildInfoMembers: T[Map[String, String]]` - -The map containing all member names and values for the generated class. - -* `def buildInfoClassName: String`, default: `BuildInfo` - -The name of the class that will contain all the members from -`buildInfoMembers`. - -* `def buildInfoPackageName: Option[String]`, default: `None` - -The package name for the generated class. - - -== Kotlin - -https://kotlinlang.org/[Kotlin] compiler support for mill. - -Project home: https://github.com/lefou/mill-kotlin - -[source,scala] ----- -// Load the plugin from Maven Central via ivy/coursier -import $ivy.`de.tototec::de.tobiasroeser.mill.kotlin_mill0.9:0.2.0` - -import mill._ -import mill.scalalib._ -import mill.define._ - -import de.tobiasroeser.mill.kotlin._ - -object main extends KotlinModule { - - // Select the Kotlin version - def kotlinVersion = "1.4.21" - - // Set additional Kotlin compiler options, e.g. the language level and annotation processor - // Run `mill main.kotlincHelp` to get a list of supported options - def kotlincOptions = super.kotlincOptions() ++ Seq("-verbose") - -} ----- - -For documentation please visit the https://github.com/lefou/mill-kotlin[mill-kotlin project page]. - -You will find there also a version compatibility matrix. - -== MDoc - -Simple MDoc runner for Mill - -This plugin provides a mill module that allows to execute https://scalameta.org/mdoc/[Scala MDoc] from within a mill build. -Scala MDoc simply compiles properly marked Scala snippets in plain md files and optionally runs them through an interpreter, augmenting the code with the interpreter output. - -Project home: https://github.com/atooni/mill-mdoc - -== `millw` / `millw.bat` - Mill Wrapper Scripts - -Small script to automatically fetch and execute mill build tool. - -Project home: https://github.com/lefou/millw - -`millw` is a small wrapper script around mill and works almost identical to -mill, but with additional features and compatibility with Windows. It -automatically downloads a mill release into `$HOME/.mill/download`. - -== MiMa - -Check binary compatibility with mill. - -Project home: https://github.com/lolgab/mill-mima - - -Just mix-in the `Mima` trait into your `ScalaModule`. -And set the previous artifacts you want to check binary compatibility. - -[source,scala] ----- -import mill._, scalalib._ - -import $ivy.`com.github.lolgab::mill-mima_mill0.9:0.0.2` -import com.github.lolgab.mill.mima._ - -object main extends ScalaModule with Mima { - - def mimaPreviousArtifacts = Agg( - ivy"my_group_id::main:my_previous_version" - ) - - // other settings ... - -} ----- - -You can then check the binary compatibility of the module with: - -[source,bash] ----- -> mill main.mimaReportBinaryIssues -Binary compatibility check passed. ----- - -== Missinglink - -https://github.com/spotify/missinglink[missinglink] check for Mill, ported from https://github.com/scalacenter/sbt-missinglink[sbt-missinglink]. - -Project home: https://github.com/hoangmaihuy/mill-missinglink - -_build.sc_: -[source,scala] ----- -import $ivy.`io.github.hoangmaihuy::mill-missinglink::` -import io.github.hoangmaihuy.missinglink._ - -object example extends MissinglinkCheckModule ----- - -Runtime missinglink check command - -[source,shell script] ----- -> mill example.missinglinkCheck ----- - -== Native-Image - -Build GraalVM Native-Image binaries with mill. - -Project home: https://github.com/alexarchambault/mill-native-image - -Import the plugin and add the `NativeImage` trait to your module and set some -configuration options: - -[source,scala] ----- -import $ivy.`io.github.alexarchambault.mill::mill-native-image::0.1.25` -import io.github.alexarchambault.millnativeimage.NativeImage - -object hello extends ScalaModule with NativeImage { - def scalaVersion = "3.3.0" - def ivyDeps = ... // Your deps here - - def nativeImageName = "hello" - def nativeImageMainClass = "Main" - def nativeImageGraalVmJvmId = "graalvm-java17:22.3.2" - def nativeImageClassPath = runClasspath() - def nativeImageOptions = Seq( - "--no-fallback", - "--enable-url-protocols=http,https", - "-Djdk.http.auth.tunneling.disabledSchemes=", - ) ++ (if (sys.props.get("os.name").contains("Linux")) Seq("--static") else Seq.empty) -} ----- - -Then run the `nativeImage` task to build the native-image binary. - -[source,bash] ----- -> ./mill hello.nativeImage -... ------------------------------------------------------------------------------------------------------------------------- - 5.9s (4.9% of total time) in 32 GCs | Peak RSS: 5.71GB | CPU load: 5.84 ------------------------------------------------------------------------------------------------------------------------- -Produced artifacts: - /Users/myuser/repos/scala/mill-native-image/example/out/hello/nativeImage.dest/hello (executable -) - /Users/myuser/repos/scala/mill-native-image/example/out/hello/nativeImage.dest/hello.build_artifacts.txt (txt) -======================================================================================================================== -Finished generating 'hello' in 2m 0s. ----- - -For more configuration options, building binaries inside Docker, a sample project -and more, check the project readme. - -== OSGi - -Produce OSGi Bundles with mill. - -Project home: https://github.com/lefou/mill-osgi - - -[source,scala] ----- -import mill._, mill.scalalib._ -import $ivy.`de.tototec::de.tobiasroeser.mill.osgi:0.0.5` -import de.tobiasroeser.mill.osgi._ - -object project extends ScalaModule with OsgiBundleModule { - - def bundleSymbolicName = "com.example.project" - - def osgiHeaders = T{ super.osgiHeaders().copy( - `Export-Package` = Seq("com.example.api"), - `Bundle-Activator` = Some("com.example.internal.Activator") - )} - - // other settings ... - -} ----- - -== PowerShell Completion - -Basic PowerShell completion support. - -Project home: https://github.com/sake92/mill-powershell-completion - - -== PublishM2 - -_Since Mill `0.6.1-27-f265a4` there is a built-in `publishM2Local` target in `PublishModule`._ - -Mill plugin to publish artifacts into a local Maven repository. - -Project home: https://github.com/lefou/mill-publishM2 - - -Just mix-in the `PublishM2Module` into your project. -`PublishM2Module` already extends mill's built-in `PublishModule`. - -File: `build.sc` - -[source,scala] ----- -import mill._, scalalib._, publish._ - -import $ivy.`de.tototec::de.tobiasroeser.mill.publishM2:0.0.1` -import de.tobiasroeser.mill.publishM2._ - -object project extends PublishModule with PublishM2Module { - // ... -} ----- - -Publishing to default local Maven repository - -[source,bash] ----- -> mill project.publishM2Local -[40/40] project.publishM2Local -Publishing to /home/user/.m2/repository ----- - -Publishing to custom local Maven repository - -[source,bash] ----- -> mill project.publishM2Local /tmp/m2repo -[40/40] project.publishM2Local -Publishing to /tmp/m2repo ----- - -== Rust JNI - -A plugin for build Rust JNI code! - -Project home: https://github.com/otavia-projects/mill-rust-jni - -For documentation please visit the https://github.com/otavia-projects/mill-rust-jni[mill-rust-jni project page]. - -== ScalablyTyped - -https://scalablytyped.org/[Scalablytyped] support for mill. - -Project home: https://github.com/lolgab/mill-scalablytyped - - -Mix-in the `ScalablyTyped` trait into your `ScalaJSModule` and -set-up a `package.json` file with your TypeScript dependencies. - -[source,scala] ----- -import mill._, scalalib._ - -import $ivy.`com.github.lolgab::mill-scalablytyped::0.0.2` -import com.github.lolgab.mill.scalablytyped._ - -object main extends ScalaJSModule with ScalablyTyped { - - // other settings ... - -} ----- - -It will run ScalablyTyped and add the converted dependencies to the module's `ivyDeps`. - -== Scala TSI - -https://github.com/scala-tsi/scala-tsi[scala-tsi] support for Mill - -Project home: https://github.com/hoangmaihuy/mill-scala-tsi - -_build.sc_: - -[source,scala] ----- -import $ivy.`io.github.hoangmaihuy::mill-scala-tsi::` - -import io.github.hoangmaihuy.scalatsi._ - -object example extends ScalaModule with ScalaTsiModule { -// The classes that you want to generate typescript interfaces for -override def typescriptExports = Seq("MyClass") -// The output file which will contain the typescript interfaces -override def typescriptOutputFile = millSourcePath / "model.ts" -// Include the package(s) of the classes here -// Optionally import your own TSType implicits to override default default generated -override def typescriptGenerationImports = Seq("mymodel._", "MyTypescript._") -} ----- - -_MyClass.scala_: -[source,scala] ----- -case class MyClass(foo: String, bar: Int) ----- - -Generate Typescript command: - -[source,shell script] ----- -> mill example.generateTypescript ----- - -_model.ts_: -[source] ----- -export interface IMyClass { - foo: string - bar: number -} ----- - -== Scalafix - -https://scalacenter.github.io/scalafix/[Scalafix] support for mill. - -Project home: https://github.com/joan38/mill-scalafix - -_build.sc_: - -[source,scala] ----- -import $ivy.`com.goyeau::mill-scalafix:` -import com.goyeau.mill.scalafix.ScalafixModule -import mill.scalalib._ - -object project extends ScalaModule with ScalafixModule { - def scalaVersion = "2.12.11" -} ----- - -[source,shell script] ----- -> mill project.fix -[29/29] project.fix -/project/project/src/MyClass.scala:12:11: error: [DisableSyntax.var] mutable state should be avoided - private var hashLength = 7 - ^^^ -1 targets failed -project.fix A Scalafix linter error was reported ----- - -== SCIP (SCIP Code Intelligence Protocol) - -Support for generating https://about.sourcegraph.com/blog/announcing-scip[SCIP] -indexes from your Mill build. This is most commonly used to power intelligent -code navigation on https://sourcegraph.com/[Sourcegraph]. - -Project home: https://github.com/ckipp01/mill-scip - - -The recommended way to use `mill-scip` is via the -https://sourcegraph.github.io/scip-java/[`scip-java`] cli tool that can be -installed via https://get-coursier.io/[Coursier]. - -[source, shell script] ----- -cs install scip-java ----- - -Once you have `scip-java` installed the following command and the root of your -Mill build will generate an index and place it at the root of your project. - -[source, shell script] ----- -scip-java index ----- - -You can also manually trigger this with Mill by doing the following: - -[source, shell script, subs="attributes,verbatim"] ----- - -mill --import ivy:io.chris-kipp::mill-scip::{mill-scip-version} io.kipp.mill.scip.Scip/generate ----- - -This will then generate your `index.scip` inside of -`out/io/kipp/mill/scip/Scip/generate.dest/`. - -== Shell Completions - -As Mill is a tool often used from the CLI (Command line interface), you may be also interested in installing some completion support for your preferred shell: - -* <<_bash_completion>> -* <<_fish_completion>> -* <<_zsh_completion>> - - -== Spring Boot - -Support packaging Spring Boot Applications with Mill. - -Project home: https://github.com/lefou/mill-spring-boot - -[source,scala,subs="attributes,verbatim"] ----- -import mill._ -import mill.scalalib._ -import de.tobiasroeser.mill.spring.boot.SpringBootModule - -object app extends MavenModule with SpringBootModule { - override def springBootToolsVersion = "2.7.13" -} ----- - -[source,shell] ----- -# Package as executable Spring-Boot Application -$ mill app.springBootAssembly ----- - -== Universal Packager - -Support universal archive packaging for Java application with Mill, ported from sbt-native-packager. - -Project home: https://github.com/hoangmaihuy/mill-universal-packager - -[source,scala,subs="attributes,verbatim"] ----- -// build.sc -import $ivy.`io.github.hoangmaihuy::mill-universal-packager::` - -import io.github.hoangmaihuy.mill.packager.archetypes.JavaAppPackagingModule - -object example extends JavaAppPackagingModule { - override def packageVersion = "0.1.0" -} ----- - -[source,shell] ----- -# Package as zip archive with Bash start script -$ mill example.universalPackage ----- - -== VCS Version - -Mill plugin to derive a version from (last) git tag and edit state. It may support other VCS as well. - -Project home: https://github.com/lefou/mill-vcs-version - -Lots of formatting options are provided. -When used with its defaults, the outcome is identical to the version scheme that Mill itself uses. - - -[source,scala] ----- -import mill._ -import mill.scalalib._ - -// Load the plugin from Maven Central via ivy/coursier -import $ivy.`de.tototec::de.tobiasroeser.mill.vcs.version::0.1.2` -import de.tobiasroeser.mill.vcs.version.VcsVersion - -object main extends JavaModule with PublishModule { - override def publishVersion: T[String] = VcsVersion.vcsState().format() -} ----- - -== Zsh Completion - -Limited zsh completion support. - -This plugin adds ZSH shell completions to Mill. - -Project home: https://github.com/carlosedp/mill-zsh-completions diff --git a/docs/modules/ROOT/pages/Using_Plugins.adoc b/docs/modules/ROOT/pages/Using_Plugins.adoc deleted file mode 100644 index ef0c25b0b34..00000000000 --- a/docs/modules/ROOT/pages/Using_Plugins.adoc +++ /dev/null @@ -1,71 +0,0 @@ -= Using Plugins - -Mill plugins are ordinary jars and are loaded as any other external dependency with the xref:Import_File_And_Import_Ivy.adoc[`import $ivy` mechanism]. - -There exist a large number of Mill plugins, Many of them are available on GitHub and via Maven Central. We also have a list of plugins, which is most likely not complete, but it might be a good start if you are looking for plugins: xref:Thirdparty_Plugins.adoc[]. - -Some plugin contributions are also hosted in Mill's own git tree as xref:Contrib_Plugins.adoc[]. - -Mill plugins are typically bound to a specific version range of Mill. -This is called the binary platform. -To ease the use of the correct versions and avoid runtime issues (caused by binary incompatible plugins, which are hard to debug) you can apply one of the following techniques: - -== Use the specific Mill Binary Platform notation - -[source,scala] ----- -// for classic Scala dependencies -import $ivy.`::::` // <1> -// for dependencies specific to the exact Scala version -import $ivy.`:::::` // <2> ----- -<1> This is equivalent to -+ -[source,scala] ----- -import $ivy.`::_mill$MILL_BIN_PLATFORM:` ----- -<2> This is equivalent to -+ -[source,scala] ----- -import $ivy.`:::_mill$MILL_BIN_PLATFORM:` ----- - - -== Use special placeholders in your `import $ivy` - -`$MILL_VERSION` :: -+ --- -to substitute the currently used Mill version. -This is typical required for Mill contrib modules, which are developed in the Mill repository and highly bound to the current Mill version. - -.Example: Use `mill-contrib-bloop` plugin matching the current Mill version ----- -import $ivy.`com.lihaoyi:mill-contrib-bloop:$MILL_VERSION` ----- - -There is the even more convenient option to leave the version completely empty. -Mill will substitute it with its current version. -But don't forget to provide the trailing colon! - -.Example: Use `mill-contrib-bloop` plugin matching the current Mill version ----- -import $ivy.`com.lihaoyi:mill-contrib-bloop:` ----- --- - -`$MILL_BIN_PLATFORM` :: -+ --- -to substitute the currently used Mill binary platform. - -.Example: Using `mill-vcs-version` plugin matching the current Mill Binary Platfrom ----- -import $ivy.`de.tototec::de.tobiasroeser.mill.vcs.version_mill$MILL_BIN_PLATFORM:0.1.2` ----- --- - -TIP: If you want to publish re-usable libraries that _other_ people can use in their builds, simply publish your code as a library to maven central. - diff --git a/docs/modules/ROOT/pages/Writing_Mill_Plugins.adoc b/docs/modules/ROOT/pages/Writing_Mill_Plugins.adoc deleted file mode 100644 index 2b899470ddc..00000000000 --- a/docs/modules/ROOT/pages/Writing_Mill_Plugins.adoc +++ /dev/null @@ -1,3 +0,0 @@ -= Writing Mill Plugins - -include::example/extending/plugins/7-writing-mill-plugins.adoc[] \ No newline at end of file diff --git a/docs/modules/ROOT/pages/android/android-linting.adoc b/docs/modules/ROOT/pages/android/android-linting.adoc new file mode 100644 index 00000000000..d4ff2f35056 --- /dev/null +++ b/docs/modules/ROOT/pages/android/android-linting.adoc @@ -0,0 +1,12 @@ += Linting Android Projects +:page-aliases: Linting_Android_Projects.adoc + +include::partial$gtag-config.adoc[] + +This page covers essential practices for maintaining and enforcing code quality +in Android projects using the Mill build tool. Proper linting helps detect +and resolve potential issues early, promoting better performance, security, +and user experience. + + +include::partial$example/android/javalib/3-linting.adoc[] diff --git a/docs/modules/ROOT/pages/android/java.adoc b/docs/modules/ROOT/pages/android/java.adoc new file mode 100644 index 00000000000..9eceb94a082 --- /dev/null +++ b/docs/modules/ROOT/pages/android/java.adoc @@ -0,0 +1,69 @@ += Android Java Projects +:page-aliases: android_app_examples.adoc + +include::partial$gtag-config.adoc[] + +This page provides an example of using Mill as a build tool for Android applications. +This workflow is still pretty rough and nowhere near production ready, but can serve as +a starting point for further experimentation and development. + +==== Relevant Modules + +These are the main Mill Modules that are relevant for building Android apps: + +* {mill-doc-url}/api/latest/mill/javalib/android/AndroidSdkModule.html[`mill.javalib.android.AndroidSdkModule`]: Handles Android SDK management and tools. +* {mill-doc-url}/api/latest/mill/javalib/android/AndroidAppModule.html[`mill.javalib.android.AndroidAppModule`]: Provides a framework for building Android applications. +* {mill-doc-url}/api/latest/mill/scalalib/JavaModule.html[`mill.javalib.JavaModule`]: General Java build tasks like compiling Java code and creating JAR files. + +== Simple Android Hello World Application + +include::partial$example/android/javalib/1-hello-world.adoc[] + +This example demonstrates how to create a basic "Hello World" Android application +using the Mill build tool. It outlines the minimum setup required to compile Java code, +package it into an APK, and run the app on an Android device. + +== Understanding `AndroidSdkModule` and `AndroidAppModule` + +The two main modules you need to understand when building Android apps with Mill +are `AndroidSdkModule` and `AndroidAppModule`. + +`AndroidSdkModule`: + +* This module manages the installation and configuration of the Android SDK, which includes +tools like `aapt`, `d8`, `zipalign`, and `apksigner`. These tools are used +for compiling, packaging, and signing Android applications. + +`AndroidAppModule`: +This module provides the step-by-step workflow for building an Android app. It handles +everything from compiling the code to generating a signed APK for distribution. + +1. **Compiling Java code**: The module compiles your Java code into `.class` files, which is the first step in creating an Android app. +2. **Packaging into JAR**: It then packages the compiled `.class` files into a JAR file, which is necessary before converting to Android's format. +3. **Converting to DEX format**: The JAR file is converted into DEX format, which is the executable format for Android applications. +4. **Creating an APK**: The DEX files and Android resources (like layouts and strings) are packaged together into an APK file, which is the installable file for Android devices. +5. **Optimizing with zipalign**: The APK is optimized using `zipalign` to ensure better performance on Android devices. +6. **Signing the APK**: Finally, the APK is signed with a digital signature, allowing it to be distributed and installed on Android devices. + +After creating Simple Android Application now let's focus on how to create Android App Bundle Using Mill Build Tool + +== Android App Bundle + +include::partial$example/android/javalib/2-app-bundle.adoc[] + +== Understanding `AndroidAppBundle` + +The `AndroidAppBundle` trait is used to create and manage Android App Bundles (AAB) in Mill. It provides tasks for creating, building, and signing an AAB from Android resources and DEX files. + +* {mill-doc-url}/api/latest/mill/javalib/android/AndroidAppBundle.html[`mill.javalib.android.AndroidAppBundle`]: Provides a framework for building Android App Bundle. + +==== Key Functions + +- **androidAaptOptions:** Here, Overrides `androidAaptOptions` to add the `--proto-format` option to AAPT commands, enabling protocol buffer format for assets. + +- **androidBundleZip:** Creates a zip archive containing: `Compiled DEX files`, `Resources`, `libraries`, and `assets`, The `Android manifest`. + This zip follows the Android App Bundle format, as outlined in the official documentation. + +- **androidUnsignedBundle:** Uses the `bundleTool` to build an unsigned AAB from the bundle zip. + +- **androidBundle:** Signs the AAB using a specified keystore with the `jarsigner` tool, producing a signed Android App Bundle (AAB). diff --git a/docs/modules/ROOT/pages/android/kotlin.adoc b/docs/modules/ROOT/pages/android/kotlin.adoc new file mode 100644 index 00000000000..64dc36270db --- /dev/null +++ b/docs/modules/ROOT/pages/android/kotlin.adoc @@ -0,0 +1,46 @@ += Android Kotlin Projects +:page-aliases: android_app_kotlin_examples.adoc + +include::partial$gtag-config.adoc[] + +This page provides an example of using Mill as a build tool for Android applications. +This workflow is still pretty rough and nowhere near production ready, but can serve as +a starting point for further experimentation and development. + +=== Relevant Modules + +These are the main Mill Modules that are relevant for building Android apps: + +* {mill-doc-url}/api/latest/mill/javalib/android/AndroidSdkModule.html[`mill.javalib.android.AndroidSdkModule`]: Handles Android SDK management and tools. +* {mill-doc-url}/api/latest/mill/kotlinlib/android/AndroidAppKotlinModule.html[`mill.kotlinlib.android.AndroidAppKotlinModule`]: Provides a framework for building Android applications. +* {mill-doc-url}/api/latest/mill/kotlinlib/KotlinModule.html[`mill.kotlinlib.KotlinModule`]: General Kotlin build tasks like compiling Kotlin code and creating JAR files. + +== Simple Android Hello World Application + +include::partial$example/android/kotlinlib/1-hello-kotlin.adoc[] + +This example demonstrates how to create a basic "Hello World" Android application +using the Mill build tool. It outlines the minimum setup required to compile Kotlin code, +package it into an APK, and run the app on an Android device. + +== Understanding `AndroidSdkModule` and `AndroidAppKotlinModule` + +The two main modules you need to understand when building Android apps with Mill +are `AndroidSdkModule` and `AndroidAppKotlinModule`. + +`AndroidSdkModule`: + +* This module manages the installation and configuration of the Android SDK, which includes +tools like `aapt`, `d8`, `zipalign`, and `apksigner`. These tools are used +for compiling, packaging, and signing Android applications. + +`AndroidAppKotlinModule`: +This module provides the step-by-step workflow for building an Android app. It handles +everything from compiling the code to generating a signed APK for distribution. + +1. **Compiling Kotlin code**: The module compiles your Kotlin code into `.class` files, which is the first step in creating an Android app. +2. **Packaging into JAR**: It then packages the compiled `.class` files into a JAR file, which is necessary before converting to Android's format. +3. **Converting to DEX format**: The JAR file is converted into DEX format, which is the executable format for Android applications. +4. **Creating an APK**: The DEX files and Android resources (like layouts and strings) are packaged together into an APK file, which is the installable file for Android devices. +5. **Optimizing with zipalign**: The APK is optimized using `zipalign` to ensure better performance on Android devices. +6. **Signing the APK**: Finally, the APK is signed with a digital signature, allowing it to be distributed and installed on Android devices. diff --git a/docs/modules/ROOT/pages/cli/builtin-commands.adoc b/docs/modules/ROOT/pages/cli/builtin-commands.adoc new file mode 100644 index 00000000000..577748c5963 --- /dev/null +++ b/docs/modules/ROOT/pages/cli/builtin-commands.adoc @@ -0,0 +1,10 @@ += Built-in Commands +:page-aliases: Scala_Builtin_Commands.adoc, + +include::partial$gtag-config.adoc[] + +:language: Scala +:language-small: scala + + +include::partial$example/cli/builtins/1-builtin-commands.adoc[] diff --git a/docs/modules/ROOT/pages/cli/flags.adoc b/docs/modules/ROOT/pages/cli/flags.adoc new file mode 100644 index 00000000000..025972be24d --- /dev/null +++ b/docs/modules/ROOT/pages/cli/flags.adoc @@ -0,0 +1,224 @@ += Mill Command-Line Flags + +When running Mill, keep in mind there are often _four_ things that can take command-line +flags or options: + +1. The task you are running, e.g. `foo.run --text hello`. These are passed directly from the + command line + +2. The JVM running the *task* e.g. `foo.run`, which may take flags e.g. `java -Xss10m -Xmx10G`. + These are configured using xref:javalib/module-config.adoc#_compilation_execution_flags[Compilation and Execution Flags] + +3. The Mill build tool process, e.g. `./mill --jobs 10`. These can be passed directly after the + `./mill` executable name, or set in a `.mill-opts` file as shown below in + xref:#_repo_level_mill_options[Repo-Level Mill Options] + +4. The JVM running the Mill build tool process, which may take flags. e.g. + `java -Xss10m -Xmx10G`. These are passed via `JAVA_OPTs` or via a `.mill-jvm-opts` flag + as shown below in xref:#_running_mill_with_custom_jvm_options[Mill with custom JVM options] + +This can get confusing, so when you want to pass some flags to a Mill task, to Mill, or to +the JVM (which one?) be clear who you want to pass the flags to so you can ensure they go +to the right place. + +== Mill Flags Listing + +To see a cheat sheet of all the command line flags that Mill supports, you can use `./mill --help`: + +```scala +$ ./mill --help +Mill Build Tool, version 0.12.5-13-0e02e6 +Usage: mill [options] task [task-options] [+ task ...] + +task cheat sheet: + mill resolve _ # see all top-level tasks and modules + mill resolve __.compile # see all `compile` tasks in any module (recursively) + + mill foo.bar.compile # compile the module `foo.bar` + + mill foo.run --arg 1 # run the main method of the module `foo` and pass in `--arg 1` + mill -i foo.console # run the Scala console for the module `foo` (if it is a ScalaModule) + + mill foo.__.test # run tests in modules nested within `foo` (recursively) + mill foo.test arg1 arg2 # run tests in the `foo` module passing in test arguments `arg1 arg2` + mill foo.test + bar.test # run tests in the `foo` module and `bar` module + mill '{foo,bar,qux}.test' # run tests in the `foo` module, `bar` module, and `qux` module + + mill foo.assembly # generate an executable assembly of the module `foo` + mill show foo.assembly # print the output path of the assembly of module `foo` + mill inspect foo.assembly # show docs and metadata for the `assembly` task on module `foo` + + mill clean foo.assembly # delete the output of `foo.assembly` to force re-evaluation + mill clean # delete the output of the entire build to force re-evaluation + + mill path foo.run foo.sources # print the task chain showing how `foo.run` depends on `foo.sources` + mill visualize __.compile # show how the `compile` tasks in each module depend on one another + +options: + -D --define Define (or overwrite) a system property. + --allow-positional Allows command args to be passed positionally without `--arg` by default + -b --bell Ring the bell once if the run completes successfully, twice if it fails. + --bsp Enable BSP server mode. + --color Toggle colored output; by default enabled only if the console is interactive + and NO_COLOR environment variable is not set + -d --debug Show debug output on STDOUT + --disable-callgraph Disables fine-grained invalidation of tasks based on analyzing code changes. + If passed, you need to manually run `clean` yourself after build changes. + --disable-prompt Disables the new multi-line status prompt used for showing thread status at + the command line and falls back to the legacy ticker + --help Print this help message and exit. + -i --interactive Run Mill in interactive mode, suitable for opening REPLs and taking user + input. This implies --no-server. Must be the first argument. + --import Additional ivy dependencies to load into mill, e.g. plugins. + -j --jobs The number of parallel threads. It can be an integer e.g. `5` meaning 5 + threads, an expression e.g. `0.5C` meaning half as many threads as available + cores, or `C-2` meaning 2 threads less than the number of cores. `1` disables + parallelism and `0` (the default) uses 1 thread per core. + -k --keep-going Continue build, even after build failures. + --meta-level Select a meta-level to run the given tasks. Level 0 is the main project in + `build.mill`, level 1 the first meta-build in `mill-build/build.mill`, etc. + --no-server Run without a background server. Must be the first argument. + -s --silent Make ivy logs during script import resolution go silent instead of printing + --ticker Enable ticker log (e.g. short-lived prints of stages and progress bars). + -v --version Show mill version information and exit. + -w --watch Watch and re-run the given tasks when when their inputs change. + task ... The name or a pattern of the tasks(s) you want to build. + +Please see the documentation at https://mill-build.org for more details +``` + +== Notable Flags + +This section covers some of the flags that are worth discussing in more detail + +=== `--interactive`/`-i`/`--no-server` + +This flag is necessary to run any interactive terminal programs using Mill: things like +`ScalaModule#console`, `ScalaModule#repl`, and so on. + +By default, Mill runs tasks in a long-lived background server. While this is good for +performance (as it avoids paying the server startup time each command), it is incompatible +with tasks like `.repl` which require a direct `stdin`/`stdout` forwarding connection to +the user's terminal. `--interactive`/`-i` instead runs tasks in a short-lived background +server with proper port pipe forwarding configured, which enables tasks like `.repl` to run +while paying what higher startup overhead. + + + +=== `--watch`/`-w` + +You can use the `--watch` flag to make Mill watch a task's inputs, +re-evaluating the task as necessary when the inputs +change: + +[source,bash] +---- +$ mill --watch foo.compile +$ mill --watch foo.run +$ mill -w foo.compile +$ mill -w foo.run +---- + +Mill's `--watch` flag watches both the files you are building using Mill, as +well as Mill's own `build.mill` file and anything it imports, so any changes to +your `build.mill` will automatically get picked up. + +For long-running processes like web servers, you can use `runBackground` to make sure they recompile and restart when code changes, +forcefully terminating the previous process even though it may be still alive: + +[source,bash] +---- +$ mill -w foo.runBackground +---- + + +=== `--jobs`/`-j` + +By default, Mill will evaluate all tasks in parallel, with the number of concurrent +tasks equal to the number of cores on your machine. You can use the `--jobs` (`-j`) to configure +explicitly how many concurrent tasks you wish to run. To disable parallel execution use `-j1`. + +Example: Use up to 4 parallel threads to compile all modules: + +[source,bash] +---- +mill -j4 __.compile +---- + +You can also set Mill's parallelism to some multiple of the number of cores, e.g. +`-j0.5C` to use half as many threads as cores, or `-j2C` to use twice as many threads as cores. +These can be useful as xref:_repo_level_mill_options[] to configure an appropriate level +of parallelism that scales based on the number of cores available (which might differ +between e.g. developer laptops and CI machines) + +Every `mill` run generates an output file in `out/mill-chrome-profile.json` that can be +loaded into the Chrome browser's `chrome://tracing` page for visualization. +This can make it much easier to analyze your parallel runs to find out what's +taking the most time: + +image::basic/ChromeTracing.png[ChromeTracing.png] + +Note that the maximal possible parallelism depends both on the number of cores +available as well as the task and module structure of your project, as tasks that +depend on one another other cannot be processed in parallel + + +== Repo-Level Mill Options + +Mill supports the `.mill-opts` file for passing a default set of command line +options to Mill itself. For example, if your project's tasks are CPU heavy, you +may want everyone using your project to run only 0.5 concurrent tasks per CPU. This +can be done by setting `.mill-opts` to: + +_.mill-opts_ +---- +--jobs=0.5C +---- + +The file name `.mill-opts` can be overridden via the `MILL_OPTS_PATH` environment variable. +You can also pass in flags like `--jobs=10` explicitly to override the value passed in +`.mill-opts`. + +NOTE: `.mill-jvm-opts` is for passing JVM options to the JVM running Mill, +and `.mill-opts` is for passing options to Mill itself. If you want to pass JVM options +to the project that Mill is building and running, see the section on +xref:javalib/module-config.adoc#_compilation_execution_flags[Compilation and Execution Flags]. + +== Running Mill with custom JVM options + +It's possible to pass JVM options to the Mill launcher. To do this you can either set +the `JAVA_OPTS` environment variable, or create a `.mill-jvm-opts` file in your project's +root that contains JVM options one per line. + +For example, if your build requires a lot of memory and bigger stack size, you could run + +```bash +> JAVA_OPTS='-Xss10m -Xmx10G' ./mill __.compile +``` + +Or you could create a `.mill-jvm-opts`: + +_.mill-jvm-opts_ +---- +-Xss10m +-Xmx10G +---- + +Note that `.mill-jvm-opts` requires each CLI token to be on a separate line, so +`-Xss10m -Xmx10G` on a single line is not allowed (as it would pass `"-Xss10m -Xmx10G"` +as a single token and fail argument parsing) + +`.mill-jvm-opts` also supports environment variable interpolation, e.g. + +_.mill-jvm-opts_ +---- +# PWD on mac/linux +-Dmy.jvm.property=${PWD} +---- + +Missing environment variables are +converted to the empty string. + +The file name `.mill-jvm-opts` can be overridden via the `MILL_JVM_OPTS_PATH` environment +variable. + diff --git a/docs/modules/ROOT/pages/cli/installation-ide.adoc b/docs/modules/ROOT/pages/cli/installation-ide.adoc new file mode 100644 index 00000000000..43501cb29a7 --- /dev/null +++ b/docs/modules/ROOT/pages/cli/installation-ide.adoc @@ -0,0 +1,414 @@ += Installation & IDE Setup + +The standard method of installing Mill is to install a `./mill` <<_bootstrap_scripts,bootstrap script>>, +similar to `./gradlew` or `./mvnw` in other build tools. +This script can determine the best version to be used by a project (e.g. by +reading a `.mill-version`) and will use this exact Mill version. +If the determined Mill version is not installed locally, it will be downloaded automatically. + +For all the examples in this documentation, there is a `download` link that provides +a zip file containing the full example ready to use. These examples come with a `./mill` +script you can use to immediately begin working with the project, needing only a JVM installed +globally. + +[#_bootstrap_scripts] +== Bootstrap Scripts + +Although the Mill example projects come with their own `./mill` and `./mill.bat` bootstrap script, +you can also download it manually: + +[source,bash,subs="verbatim,attributes"] +---- +# Mac/Linux +curl -L {mill-github-url}/releases/download/{mill-last-tag}/{mill-last-tag} -o mill +chmod +x mill +echo {mill-last-tag} > .mill-version + +# Windows +curl -L {mill-github-url}/releases/download/{mill-last-tag}/{mill-last-tag}.bat -o mill.bat +echo {mill-last-tag} > .mill-version +---- + +Downloading a `mill` bootstrap script to the root of your project repository helps make it easier for +new contributors to build your project, as they don't have to install Mill before they can start. +Anyone who wants to work with the project can simply use the `./mill` script directly. + +[source,bash] +---- +./mill --version +./mill __.compile # double underscore +---- + + +In general, bootstrap scripts are the recommended way of installing Mill. +Similar to `./gradlew` or `./mvnw`, the `./mill` bootstrap script +reduces the chance of errors due to the installed version of Mill +being incompatible with the version expected by your build. +In-project bootstrap scripts are also useful for running Mill in CI/CD, ensuring +that your build server like Jenkins or Github Actions has the correct version of Mill +present to build, compile or test your code. + +If you are starting a new project using a Mill bootstrap script, you can use the +xref:cli/builtin-commands.adoc#_init[mill init] to initialize the project +folder with one of the Mill example projects. There are a wide range of example projects, +from hello-world to multi-module libraries to client-server web applications, and you can +pick one most similar to what you are doing so you can hit the ground running working. + +=== Mill Native Executable + +The default Mill executable configured above requires a JVM (11 or above) installed globally in +order to run, and imposes some Java startup overhead (100-200ms) when running Mill from the +command line.You can also use Mill's native executables by appending a `-native` suffix to the +Mill version: + +[source,bash,subs="verbatim,attributes"] +---- +echo {mill-last-tag}-native > .mill-version +---- + +Using the `-native` suffix should provide a faster CLI experience than using Mill's default +JVM launcher. Mill native executables are supported on the following OS/CPU combinations: + +- `windows-amd64` (Intel Windows) +- `linux-amd64` (Intel Linux) +- `mac-aarch64` (M1-M4 Mac) + +If you are on a combination not supported by Mill's native executables, use the default +JVM launcher instead. + +=== Customizing Mill's JVM + +To use Mill in environments without a JVM installed, the native launcher can download its +own JVM as necessary. You need to specify the version you want via a `.mill-jvm-version` +file such as: + +```bash +echo temurin:17.0.6 > .mill-jvm-version +``` + +`.mill-jvm-version` can also be used with Mill's default JVM launcher, if you want +your Mill process to use a different JVM than what you have installed globally. + + +== IDE Support + +:link-metals: https://scalameta.org/metals/ + +Mill supports IntelliJ and VSCode and in general any client of the standard +https://build-server-protocol.github.io/[Build Server Protocol (BSP)]. + +To prepare your project for IDEs, and in general any BSP client, you can run this command to generate the BSP configuration files: + +[source,bash] +---- +./mill mill.bsp.BSP/install +---- + +Your IDEs may already auto-detect the Mill project and run this command on behalf of you, when opening/importing the project. + +=== IntelliJ + +To use Mill with IntelliJ, first ensure you have the free +https://plugins.jetbrains.com/plugin/1347-scala[IntelliJ Scala Plugin] +installed. This is necessary as Mill build files are written in Scala, +even if you are using it to build a Java or Kotlin project. + +Once you have the plugin installed, you can use IntelliJ to open any project +containing a Mill `build.mill` file, and IntelliJ will automatically load the +Mill build. If you have multiple build systems installed, Intellij may give you +choice which build system configuration to use for the import, which case select `BSP`: + +image::basic/IntellijSelectBsp.png[] + + + +This will provide support both for your application code, +as well as the code in the `build.mill`: + +image::basic/IntellijApp.png[] + +image::basic/IntellijBuild.png[] + +If IntelliJ does not highlight the `.mill` files correctly, you can explicitly enable +it by adding `*.mill` to the `Scala` file type: + +image::basic/IntellijFileTypeConfig.png[] + +If you make changes to your Mill `build.mill`, you can ask Intellij to load +those updates by opening the "BSP" tab and clicking the "Refresh" button + +image::basic/IntellijRefresh.png[] + +==== IntelliJ IDEA XML Support + +Apart from using the Build Server Protocol, you can also generate IDEA project +files directly with Mill. This is probably the preferred way if you work on +polyglot projects and need support for frameworks like AspectJ, +which are currently not specifically configured over BSP. + +To generate IntelliJ IDEA project files into `.idea/`, run: + +[source,bash] +---- +./mill mill.idea.GenIdea/ +---- + +This will generate the XML files IntelliJ uses to configure your project + +``` +.idea +.idea/scala_settings.xml +.idea/mill_modules +.idea/mill_modules/.iml +.idea/mill_modules/mill-build.iml +.idea/mill_modules/test.iml +.idea/libraries +.idea/libraries/mill_scalalib_2_13_0_11_10_jar.xml +... +.idea/workspace.xml +.idea/modules.xml +.idea/scala_compiler.xml +.idea/misc.xml +``` + +After the files are generated, you can open the folder in IntelliJ to load the project +into your IDE. If you make changes to your Mill `build.mill`, you can update the project config +those updates by running `./mill mill.idea.GenIdea/` again. + +=== VSCode + +To use Mill with VSCode, first ensure you have the free +https://marketplace.visualstudio.com/items?itemName=scalameta.metals[Metals VSCode Scala language server] +installed. This is necessary as Mill build files are written in Scala, +even if you are using it to build a Java project. + +NOTE: Mill in VSCode only supports Java and Scala. Kotlin users are advised to use the free IntelliJ IDEA Community Edition + +Once you have the language server installed, you can ask VSCode to open any folder +containing a Mill `build.mill` file, and VSCode will ask you to import your +Mill build. This will provide support both for your application code, +as well as the code in the `build.mill`: + +image::basic/VSCodeApp.png[] + +image::basic/VSCodeBuild.png[] + +If you make changes to your Mill `build.mill`, you can ask VSCode to load +those updates by opening the "BSP" tab and clicking the "Refresh" button + +image::basic/VSCodeRefresh.png[] + +=== Other Editors / Metals + +A lot of other editors may work too, since {link-metals}[Metals], the Language Server for Scala has built-in support for BSP. See the <<_ide_support,general instructions>> above. + +=== Debugging IDE issues + +Mill's BSP IDE integration writes to a log file under +`.bsp/mill-bsp.stderr`, where you can find various information about what's +going on. It contains regular Mill output accompanied by additional BSP +client-server communication details. This can be useful to look at if your +IDE fails to import your Mill project + +== Updating Mill + +Typically, most Mill projects use a `.mill-version` file to configure what version +to use. You can update the version specified in this file in order to change the version +of Mill. The file path `.config/mill-version` is also supported. If neither is provided, +the `./mill` bootstrap script will use the `DEFAULT_MILL_VERSION` it has built in. + +To choose a different Mill version on an ad-hoc basis, e.g. for experimentation, you can pass +in a `MILL_VERSION` environment variable, e.g. + +[source,bash] +---- +MILL_VERSION=0.5.0-3-4faefb mill __.compile +---- + +or + +[source,bash] +---- +MILL_VERSION=0.5.0-3-4faefb ./mill __.compile +---- + +to override the Mill version manually. This takes precedence over the version +specified in `./mill`, `.config/mill-version` or `.mill-version` + +== Working without access to Maven Central + +Under some circumstances (e.g. corporate firewalls), you may not have access maven central. +The typical symptom will be error messages which look like this; + +---- +1 tasks failed +mill.scalalib.ZincWorkerModule.classpath +Resolution failed for 1 modules: +-------------------------------------------- + com.lihaoyi:mill-scalalib-worker_2.13:0.11.1 + not found: C:\Users\partens\.ivy2\local\com.lihaoyi\mill-scalalib-worker_2.13\0.11.1\ivys\ivy.xml + download error: Caught java.io.IOException (Server returned HTTP response code: 503 for URL: https://repo1.maven.org/maven2/com/lihaoyi/mill-scalalib-worker_2.13/0.11.1/mill-scalalib-worker_2.13-0.11.1.pom) while downloading https://repo1.maven.org/maven2/com/lihaoyi/mill-scalalib-worker_2.13/0.11.1/mill-scalalib-worker_2.13-0.11.1.pom +---- + +It is expected that basic commands (e.g. clean) will not work, as Mill saying it is +unable to resolve it's own, fundamental, dependencies. Under such circumstances, you +will normally have access to some proxy, or other corporate repository which resolves +maven artifacts. The strategy is simply to tell mill to use that instead. + +The idea is to set an environment variable COURSIER_REPOSITORIES (see coursier docs). +The below command should pass the environment variable to the `mill` command. + +---- + COURSIER_REPOSITORIES=https://packages.corp.com/artifactory/maven/ mill resolve _ +---- + +If you are using bootstrap script, a more permanent solution could be to set the environment variable +at the top of the bootstrap script, or as a user environment variable etc. + + + +== Automatic Mill updates + +If your project is hosted on GitHub, GitLab, or Bitbucket, you can use +https://github.com/scala-steward-org/scala-steward[Scala Steward] to +automatically open a pull request to update your Mill version (in +`.mill-version` or `.config/mill-version` file), whenever there is a newer version available. + +TIP: Scala Steward can also +xref:scalalib/dependencies.adoc#_keeping_up_to_date_with_scala_steward[scan your project dependencies] +and keep them up-to-date. + +== Development Releases + +In case you want to try out the latest features and improvements that are +currently in the main branch, unstable versions of Mill +are +https://github.com/com-lihaoyi/mill/releases[available] as binaries named +`+#.#.#-n-hash+` linked to the latest tag. + +The easiest way to use a development release is to use one of the +<<_bootstrap_scripts>>, which support overriding Mill versions via an +`MILL_VERSION` environment variable or a `.mill-version` or `.config/mill-version` file. + + + +== Other installation methods + +CAUTION: The installation methods listed below are maintained outside of Mill and may not have +the same features as the xref:cli/installation-ide.adoc#_bootstrap_scripts[bootstrap scripts]. You can try using them, +but the officially supported way to use Mill is via the bootstrap script above, so the Mill +maintainers may be unable to help you if you have issues with some alternate installation method. + +CAUTION: Some of the installations via package managers install a fixed version of Mill and +do not support project-specific selection of the preferred Mill version. If you want to use +the `MILL_VERSION` environment variable or need support for `.mill-version` or +`.config/mill-version` files to control the actual used Mill version, please use +a xref:cli/installation-ide.adoc#_bootstrap_scripts[bootstrap script] instead. + +=== OS X + +Installation via https://github.com/Homebrew/homebrew-core/blob/master/Formula/m/mill.rb[homebrew]: + +[source,sh] +---- +brew install mill +---- + + +=== Arch Linux + +Arch Linux has an https://archlinux.org/packages/extra/any/mill/[Extra package for mill]: + +[source,bash] +---- +pacman -S mill + +---- + +=== FreeBSD + +Installation via http://man.freebsd.org/pkg/8[pkg(8)]: + +[source,sh] +---- +pkg install mill + +---- + +=== Gentoo Linux + +[source,sh] +---- +emerge dev-java/mill-bin + +---- + +=== Windows + +To get started, download Mill from +{mill-github-url}/releases/download/{mill-last-tag}/{mill-last-tag}-assembly[Github releases], and save it as `mill.bat`. + +If you're using https://scoop.sh[Scoop] you can install Mill via + +[source,bash] +---- +scoop install mill +---- + +=== WSL / MSYS2 / Cycgin / Git-Bash + +Mill also works on "sh" environments on Windows (e.g., +https://www.msys2.org[MSYS2], +https://www.cygwin.com[Cygwin], +https://gitforwindows.org[Git-Bash], +https://docs.microsoft.com/en-us/windows/wsl[WSL]); to get started, follow the instructions in the <<_manual>> +section. Note that: + +* In some environments (such as WSL), Mill might have to be run without a server (using `-i`, `--interactive`, or `--no-server`.) + +* On Cygwin, run the following after downloading mill: + +[source,bash] +---- +sed -i '0,/-cp "\$0"/{s/-cp "\$0"/-cp `cygpath -w "\$0"`/}; 0,/-cp "\$0"/{s/-cp "\$0"/-cp `cygpath -w "\$0"`/}' /usr/local/bin/mill +---- + +=== Docker + +You can download and run +a https://hub.docker.com/r/nightscape/scala-mill/["Docker image containing OpenJDK, Scala and Mill"] using + +[source,bash] +---- +docker pull nightscape/scala-mill +docker run -it nightscape/scala-mill +---- + +=== Manual + +To get started, download Mill and install it into your HOME ".local/bin" via the following +`curl`/`chmod` command: + +[source,bash,subs="verbatim,attributes"] +---- +sh -c "curl -L {mill-github-url}/releases/download/{mill-last-tag}/{mill-last-tag} > ~/.local/bin/mill && chmod +x ~/.local/bin/mill" +---- + +=== Coursier (unsupported) + +Installing mill via `coursier` or `cs` is currently not officially supported. +There are various issues, especially with interactive mode. + +=== Asdf (unsupported) + +You can install and manage Mill via the Multiple Runtime Version Manager - https://asdf-vm.com/[`asdf`]. + +Support by `asdf` is currently possible by using the https://github.com/asdf-community/asdf-mill[`asdf-mill` plugin]: + +.Steps to install the `mill` plugin and Mill with `asdf` +[source,bash] +--- +asdf plugin add mill +asdf install mill latest +asdf global mill latest +--- diff --git a/docs/modules/ROOT/pages/cli/query-syntax.adoc b/docs/modules/ROOT/pages/cli/query-syntax.adoc new file mode 100644 index 00000000000..7ca0ce9dd4f --- /dev/null +++ b/docs/modules/ROOT/pages/cli/query-syntax.adoc @@ -0,0 +1,151 @@ += Task Query Syntax +:page-aliases: Task_Query_Syntax.adoc + +include::partial$gtag-config.adoc[] + +When interacting with Mill from the CLI, you often need to select tasks or modules. +In most places, where Mill accepts a task, it really accepts a task selector query, which is the name of a task in its simplest form, but it can also contain wildcards, type pattern and other special syntax, making it a powerful tool to select specific tasks. + +== Selecting dedicated tasks + +When invoking Mill, the simplest way to run a task is to give it with a fully qualified names. + +Examples: + +---- +> mill for.compile +> mill for.run hello world +> mill foo.testCached +---- + +.Understanding task paths and path segments +**** + +Each Mill module and task has a unique path. +Each part of the path is called _segment_. +Segments are separated with a dot (`.`). +They look like regular Scala class name qualifiers. + +There are two kind of segments: _label segments_ and _cross segments_. + +_Label segments_ are the components of a task path and have the same restriction as Scala identifiers. +They must start with a letter and may contain letters, numbers and a limited set of special characters `-` (dash), `_` (underscore). +They are used to denote Mill modules, tasks, but in the case of xref:fundamentals/modules.adoc#_external_modules[external modules] their Scala package names. + +_Cross segments_ start with a label segment but contain additional square brackets (`[`, `]`]) and are used to denote cross module and their parameters. + +NOTE: Segments can be surrounded by parentheses (`(`, `)`)). +When combined with <> which contain dots (`.`), the parentheses need to be used, to avoid the dots to being interpreted as path separators. + +**** + +[#select-multiple-tasks] +== Selecting multiple tasks + +If you want to select more than one task, you have multiple options: + +* <> +* <> +* <> +* <> + +You can also combine these techniques to properly select your tasks + +[#enumerations] +== Enumerations + +Enumeration are denoted by curly braces (`{`, `}`). +Inside the curly braces you can place two or more selector paths, separated with a comma (`,`). + +Examples: + +* `{foo,bar}` simple enumerates two tasks, `foo` and `bar` +* `foo.{compile,run}` expands to `foo.compile` and `foo.run` +* `+{_,foo.bar}.baz+` expands to `+_.baz+` and `foo.bar.baz` + +[TIP] +==== +Some Shells like `bash` support curly braces expansion. +Make sure to properly mask the selector path, e.g. by putting it in quotes. + +[bash] +---- +mill "foo.{compile.run}" +---- +==== + +[#wildcards] +== Wildcard selections + +There are two wildcards, you can use as path segment. + +* `+_+` The single underscore acts as a placeholder for a single segment. + +* `+__+` The double underscore acts as a placeholder for many segments. +In particular, it can represent an empty segment. + +With wildcards, you can get explicit control over the position of a task in the build tree. + +E.g. the filter `+_._._.jar+` will match all jar tasks, that are on the third-level of the build tree. + +[#type-filters] +== Type filters for wildcard selections + +Type filters are always combined with wildcard. +They are used to limit the scope of the wildcard to only match path segments of the specified types. +For module paths this means, the represented module needs to be an instance of the specified type. + +A type filter always starts with a wildcard (`+_+`, `+__+`) followed by a colon (`:`) and finally the _type qualifier_. + +The type is matched by its name and optionally by its enclosing types and packages, separated by a `.` sign. +Since this is also used to separate task path segments, a type selector segment containing a `.` needs to be enclosed in parentheses. +A fully qualified type can be denoted with the `+_root_+` package. + +[sh] +---- +> mill resolve __:TestModule.jar +> mill resolve "(__:scalalib.TestModule).jar" +> mill resolve "(__:mill.scalalib.TestModule).jar" +> mill resolve "(__:_root_.mill.scalalib.TestModule).jar" +---- + +If the type qualifier starts with a `^` or `!`, it's only matching types which are _not_ instances of the specified type. + +[sh] +---- +> mill resolve __:^TestModule.jar +---- + +You can also add more than one type filters to a wildcard. + +[sh] +---- +> mill resolve "__:JavaModule:^ScalaModule:^TestModule.jar" +---- + +NOTE: Type filter are currently only supported for module selections, but not for task selections. +That means, you can't filter based on the result type of a task. + +[#add-task-selector] +== Start a new task selector with `+` + +On the Mill CLI you can also start a complete new task selector with the `+` sign. + +There is a subtle difference between the expansion of <>, <> and <> in contrast to the <>. + +For all the former versions, Mill parses them into a complex but single task selector path and subsequent parameters are used for all resolved tasks. + +Whereas the `+` start a completely new selector path to which you can also provide a different parameter list. This is important when using xref:fundamentals/tasks.adoc#_commands[command tasks] which can accept their own parameters. The `JavaModule.run` command is an example. + +---- +> mill foo.run hello # <1> +> mill {foo,bar}.run hello # <2> +> mill __:JavaModule:^TestModule.run hello # <3> +> mill foo.run hello + bar.run world # <4> +---- + +<1> Runs `foo.run` with the parameter `hello` +<2> Expands to `foo.run` and `bar.run` and runs both with the parameter `hello`. +<3> Selects the `run` command of all Java modules, but not test modules, and runs them with the parameter `hello`. +<4> Runs `fun.run` with the parameter `hello` and `bar.run` with the parameter `world`. + diff --git a/docs/modules/ROOT/pages/comparisons/gradle.adoc b/docs/modules/ROOT/pages/comparisons/gradle.adoc new file mode 100644 index 00000000000..e65794657d4 --- /dev/null +++ b/docs/modules/ROOT/pages/comparisons/gradle.adoc @@ -0,0 +1,437 @@ += Case Study: Mill vs Gradle +:page-aliases: Case_Study_Mill_vs_Gradle.adoc + +include::partial$gtag-config.adoc[] + +Compared to Gradle, + + +* **Mill follows Gradle's conciseness and extensibility**: Rather than pages and pages of verbose XML, every +line in a Mill build is meaningful. e.g. adding a dependency is 1 line in +Mill, like it is in Gradle, and unlike the 5 line `` declaration you find in Maven. +Like Gradle, end users can easily customize their build to fit their exact needs without +needing to go through the process of writing plugins. + +* **Mill can be xref:comparisons/gradle.adoc#_performance[2-4x faster than Gradle]**: +Although both Mill and Gradle automatically cache and parallelize your build, Mill +does so with much less fixed overhead. This means less time waiting for your build +tool, and more time for the things that really matter to your project. + +* **Mill enforces best practices by default**. +xref:depth/execution-model.adoc#_caching_in_mill[All parts of a Mill build are cached and incremental by default]. +All Mill tasks write their output to xref:fundamentals/out-dir.adoc[a standard place]. +All task inter-dependencies are automatically captured without manual annotation. Where Gradle requires +considerable effort and expertise to understand your build and set it up in the right way, Mill's +xref:comparisons/gradle.adoc#_ide_experience[good IDE experience] makes understanding +your build easier, and its xref:comparisons/gradle.adoc#_extensibility[extensibility model] +makes configuring your build foolproof, so the easiest thing to do in Mill is usually the +right thing to do. + +This page compares using Mill to Gradle, using the https://github.com/mockito/mockito[Mockito Testing Library] +codebase as the example. Mockito is a medium sized codebase, 100,000 lines of Java split over 22 +subprojects. By porting it to Mill, this case study should give you +an idea of how Mill compares to Gradle in more realistic, real-world projects. + +To do this, we have written a Mill `build.mill` file for the Mockito project. This can be used +with Mill to build and test the various submodules of the Mockito project without needing to +change any other files in the repository: + +- https://github.com/com-lihaoyi/mill/blob/main/example/thirdparty/mockito/build.mill[Mockito build.mill file] + +== Completeness + +The Mill build for Mockito is not 100% complete, but it covers most of the major parts of Mockito: +compiling Java, running JUnit tests. For now, the Android, Kotlin, and OSGI tests are skipped, +as support for xref:android/java.adoc[Building Android apps in Mill] +and xref:kotlinlib/intro.adoc[Kotlin with Mill] is still experimental. + +The goal of this exercise is not to be 100% feature complete enough to replace the Gradle build +today. It is instead meant to provide a realistic comparison of how using Mill in a realistic, +real-world project compares to using Gradle. + +== Performance + +The Mill build for Mockito is generally snappier than the Gradle build. This applies to +most workflows, but the difference matters most for workflows which are short-lived, +where the difference in the fixed overhead of the build tool is most noticeable. + +For comparison purposes, I disabled the Gradle subprojects that we did not fully implement in Mill +(`groovyTest`, `groovyInlineTest`, `kotlinTest`, `kotlinReleaseCoroutinesTest`, `android`, +`osgi-test`, `java21-test`), and added the necessary flags to ensure caching/parallelism/etc. is +configured similarly for both tools. This ensures the comparison is fair with both builds compiling the +same code and running the same tests in the same way. + +For the benchmarks below, each provided number is the median wall time of three consecutive runs +on my M1 Macbook Pro. While ad-hoc, these benchmarks are enough to give you a flavor of how +Mill's performance compares to Gradle: + +[cols="1,1,1,1"] +|=== +| Benchmark | Gradle | Mill | Speedup + +| <> | 17.6s | 5.40s | 3.3x +| <> | 12.3s | 3.57s | 3.4x +| <> | 4.41s | 1.20s | 3.7x +| <> | 1.37s | 0.51s | 2.7x +| <> | 0.94s | 0.46s | 2.0x +|=== + +The column on the right shows the speedups of how much faster Mill is compared to the +equivalent Gradle workflow. In most cases, Mill is 2-4x faster than Gradle. Below, we +will go into more detail of each benchmark: how they were run, what they mean, and how +we can explain the difference in performing the same task with the two different build tools. + +=== Sequential Clean Compile All + +```bash +$ ./gradlew clean; time ./gradlew classes testClasses --no-build-cache +17.6s +18.2s +17.4s + +$ ./mill clean; time ./mill -j 1 __.compile +5.60s +5.40s +6.13s +``` + +This benchmark measures the time taken to sequentially compiled all the Java code in +the Mockito code base. The goal of this benchmark is to try and measure the "clean compile +everything" step, without the effects of parallelism that can be nondeterministic and vary +wildly from machine to machine depending on the number of cores available. + +To limit our comparison to compiling Java code only, we avoid +using `build` in favor of `classes` and `testClasses`: this skips running tests, +lint, jar/docjar generation, and other steps that `build` performs to make it an apples-to-apples +comparison. Furthermore, Gradle parallelizes the build by default and caches things globally +under `~/.gradle/caches`, while Mill parallelizes by default but does not cache things globally. +Again to make it a fair comparison, we use `--no-build-cache` in Gradle and set +`org.gradle.parallel=false` in `gradle.properties`, and pass `-j 1` to limit Mill to a +single thread. + +Here we see Mill being about ~3.3x faster than Gradle, to do the equivalent amount of work. +As a point of reference, Java typically compiles at 10,000-50,000 lines per second on a +single thread, and the Mockito codebase is ~100,000 lines of code, so we would expect compile +to take 2-10 seconds without parallelism. +The 5-6s taken by Mill seems about what you would expect for a codebase of this size, +and the ~17s taken by Gradle is much more than what you would expect from simple Java compilation. + +It's actually not clear to me where the difference in execution time is coming from. Unlike +the xref:comparisons/maven.adoc[Mill v.s. Maven comparison], Gradle's command line output +doesn't show any obvious network requests or jar packing/unpacking/comparing going on. But +Gradle's CLI output is also much less verbose than Maven's, so it's possible things are going +on under the hood that I'm not aware of. + +=== Parallel Clean Compile All + +```bash +$ ./gradlew clean; time ./gradlew classes testClasses --no-build-cache +13.8s +12.3s +11.4s + +$ ./mill clean; time ./mill __.compile +3.59s +3.57s +3.45s +``` + +This benchmark is identical to the <> benchmark above, but enables +parallelism: Gradle by default, Mill without `-j 1` to run on 10 cores (the number on my Macbook Pro). + +Neither Gradle nor Mill benefit hugely from parallelism: both show a moderate ~50% speedup, +despite receiving 900% more CPUs. This likely indicates that the module dependency graph +of the Mockito codebase is laid out in a way that does not allow huge amounts of compile-time +parallelism. + +Again, we see Mill being about ~3.4x faster than Gradle, to do the equivalent amount of work. +This indicates the the speedup Mill provides over Gradle is unrelated to the parallelism of +each tool. + +=== Clean Compile Single-Module + +```bash +$ ./gradlew clean; time ./gradlew :classes --no-build-cache +4.14s +4.41s +4.41s + +$ ./mill clean; time ./mill compile +1.20s +1.12s +1.30s +``` + +This benchmark indicates the use case of clean-compiling a single module. In this case, +the root module in `src/main/java/` containing the bulk of the Mockito library code, +_excluding_ the test code in `src/test/java/` and all the downstream subprojects in +`subprojects/`. + +This benchmark gives us Mill being about ~3.7x faster than Gradle. This is in line with +the results above. + +=== Incremental Compile Single-Module + +```bash +$ echo "" >> src/main/java/org/mockito/BDDMockito.java; time ./gradlew :classes +1.37s +1.39s +1.28s + +$ echo "" >> src/main/java/org/mockito/BDDMockito.java; time ./mill compile +compiling 1 Java source to /Users/lihaoyi/Github/netty/out/common/compile.dest/classes ... +0.52s +0.51s +0.52s +``` + +This benchmark measures the common case of making a tiny change to a single file and +re-compiling just that module. This is the common workflow that most software developers +do over and over day-in and day-out. We simulate this by appending a new line to the +file `src/main/java/org/mockito/BDDMockito.java`. + +Both Mill and Gradle are able to take advantage of the small code change and re-compile +only the single files needing re-compilation, demonstrating substantial speedups over +the <> benchmark above. Mill remains faster than Gradle, +showing a ~2.7x speedup for this task + +=== No-Op Compile Single-Module + +```bash +$ time ./gradlew :classes +0.95s +0.93s +0.94s + +$ time ./mill common.compile +0.46s +0.50s +0.45s +``` + +This benchmark is meant to measure the pure overhead of running the build tool: given a single +module that did _not_ change, the build tool should need to do _nothing_ in response, and so +any time taken is pure overhead. + +For both Mill and Gradle, we see small speedups relative to the <> +benchmark above, which likely comes from not having to compile any Java source files at all. Mill +remains faster than Gradle by about 2.0x. + +== IDE Experience + +One area that Mill does better than Gradle is providing a seamless IDE experience. For example, +consider the snippet below where we are using Gradle to configure the javac compiler options. +Due to `.gradle` files being untyped Groovy, the autocomplete and code-assist experience working +with these files is hit-or-miss. In the example below, we can see that IntelliJ is able to identify +that `compileArgs` exists and has the type `List`: + +image::comparisons/IntellijMockitoGradleCompileOptions.png[] + +But if you try to jump to definition or find out anything else about it you hit a wall: + +image::comparisons/IntellijMockitoGradleCompileOptions2.png[] + +Often working with build configurations feels like hitting dead ends: if you don't have +`options.compilerArgs` memorized in your head, there is literally nothing you can do in your editor to +make progress to figure out what it is or what it is used for. That leaves you googling +for answers, which can be a frustrating experience that distracts you from the task at hand. + +The fundamental problem with tools like Gradle is that the code you write does not +actually perform the build: rather, you are just setting up some data structure that +is used to configure the _real_ build engine that runs later. Thus when you explore +the Gradle build in an IDE, the IDE can only explore the configuration logic (which +is usually un-interesting) and is unable to explore the actual build logic (which +is what you actually care about!) + +In comparison, Mill's `.mill` files are all statically typed, and as a result IntelliJ is easily able to +pull up the documentation for `def javacOptions`, even though it doesn't have any special support +for Mill built into the IDE: + +image::comparisons/IntellijMockitoMillJavacOptionsDocs.png[] + +Apart from static typing, the way Mill builds are structured also helps the IDE: Mill +code _actually performs your build_, rather than configuring some opaque build engine. +While that sounds academic, one concrete consequence is that IntelliJ is able to take +your `def javacOptions` override and +find the original definitions that were overridden, and show you where they are defined: + +image::comparisons/IntellijMockitoMillJavacOptionsParents.png[] + +image::comparisons/IntellijMockitoMillJavacOptionsDef.png[] + +Furthermore, because task dependencies in Mill are just normal method calls, IntelliJ is +able to _find usages_, showing you where the task is used. Below, we can see the method +call in the `def compile` task, which uses `javacOptions()` along with a number of other tasks: + +image::comparisons/IntellijMockitoMillCompile.png[] + +From there, if you are curious about any of the other tasks used alongside `javacOptions`, it's +easy for you to pull up _their_ documentation, jump to _their_ +definition, or find _their_ usages. For example we can pull up the docs of +`compileClasspath()` below: + +image::comparisons/IntellijMockitoMillCompileClasspath.png[] + +Or we can use _find usages_ on `def compile` to see where it is used, both in this build +and upstream in the Mill libraries: + +image::comparisons/IntellijMockitoMillCompileUsages.png[] + +Unlike most other build tools, Mill builds are extremely easy to explore interactively in your +IDE. If you do not know what something does, it's documentation, definition, or usages is always +one click away in IntelliJ or VSCode. That's not to say Mill builds aren't complex - as +we saw above, compilation has to deal with upstream outputs, classpaths, flags, reporters, and so on - +but at least in Mill your IDE can help you explore, understand and manage the complexity in a way +that no other build tool supports. + +Note that the IDE experience that Mill provides should already be very familiar to anyone writing +Java, Kotlin, or Scala: + +* _of course_ you can find the overridden definitions! +* _of course_ you can pull up the documentation in a click! +* _of course_ you can navigate around the codebase with your IDE, up and down + the call graph, to see who calls who! + +What Mill provides isn't rocket science, but rather it is just about taking your existing experience +and existing IDE tooling working with application codebases, and lets you use it to manage your build +system as well. + +Mill IDE support isn't perfect - you may have noticed the spurious red squigglies above - but it's +already better than most other build systems like Gradle or Maven. And that is with approximately +~zero custom integrations with the various IDEs: with some additional work, we can expect the Mill +IDE experience to improve even more over time. + +== Extensibility + +Another facet of Mill is that is worth exploring is the ease of making custom tasks or build steps. +For example, in Mill, overriding the resources to duplicate a file can be done as follows: + +```scala +def resources = Task { + os.copy( + compile().classes.path / "org/mockito/internal/creation/bytebuddy/inject/MockMethodDispatcher.class", + Task.dest / "org/mockito/internal/creation/bytebuddy/inject/MockMethodDispatcher.raw", + createFolders = true + ) + super.resources() ++ Seq(PathRef(Task.dest)) +} +``` + +In Gradle, it is written as: + +```scala +tasks.register('copyMockMethodDispatcher', Copy) { + dependsOn compileJava + + from "${sourceSets.main.java.classesDirectory.get()}/org/mockito/internal/creation/bytebuddy/inject/MockMethodDispatcher.class" + into layout.buildDirectory.dir("generated/resources/inline/org/mockito/internal/creation/bytebuddy/inject") + + rename '(.+)\\.class', '$1.raw' +} + +classes.dependsOn("copyMockMethodDispatcher") + +sourceSets.main { + resources { + output.dir(layout.buildDirectory.dir("generated/resources/inline")) + } +} +``` + +At a first glance, both of these snippets do the same thing, just with different syntaxes +and helper method names. However, on a deeper look, a few things are worth noting: + +1. In Mill, you do not need to manually add `dependsOn` clauses, unlike Gradle: + * In Mill, referencing the value of `compile()`, we both explicitly get access to the value of `compile` + and also add a dependency on it. In Gradle, you need to separately add `dependsOn compile` + to mark the dependency, and `rename '(.+)\\.class', '$1.raw'` to make use of it implicitly. + + * In Mill, overriding `def resources` is enough to make all tasks that previously depended on `resources` + now depend on the override (e.g. `run`, `test`, `jar`, `assembly`, etc.) as is the norm for object-oriented + ``override``s. In Gradle, you need to explicitly call `classes.dependsOn("copyMockMethodDispatcher")` to + make the downstream `classes` task depend on `copyMockMethodDispatcher`, and + `sourcesSets.main resources output.dir` to wire up the generated files to the resources of the module + +2. In Mill, the `resources` task is given a unique `Task.dest` folder that is unique to it. + In contrast, Gradle's `copyMockMethodDispatcher` puts things in a global `generated/` folder + * This means that in Mill, you do not need to worry about filesystem collisions, since every + task's `Task.dest` is unique. In contrast, in Gradle you need to make sure that no other + task in the entire build is scribbling over `generated/`, otherwise they could interfere + with one another in confusing ways + + * This also means that in Mill, you always know where the output of a particular task + is - `foo.bar.resources` writes to `out/foo/bar/resources.dest/` - so you can always easily + find the output of a particular task. In Gradle, you have to dig through the source code to + find where the task is implemented and see where it is writing to. + +3. Mill passes typed structured ``Path``s and ``PathRef``s between each other, while Gradle often + uses raw path strings + * In Mill, `def resources` returns a `PathRef(Task.dest)` for downstream tasks to use, + so downstream tasks can use it directly (similar to how it makes use of + `compile().classes.path` directly). This means different tasks can refer to each other + in a foolproof way without room for error + + * In Gradle, `sourcesSets.map resources output.dir` needs to refer to the path generated by + `copyMockMethodDispatcher` via it's string `"generated/resources/inline"`. That adds a lot of + room for error, since the strings can easily get out of sync accidentally. + +In general, although the two snippets aren't that different superficially, Mill makes it +easy to do the right thing by default: + +* Upstream task dependencies are recorded automatically when used +* Overridden definitions and automatically used by downstream tasks +* Every task is automatically assigned a place on disk so you don't need + to worry about collisions and can easily find outputs +* Tasks interact with each other via typed structured values - ``Path``s, ``PathRef``s, + etc. - rather than magic strings + +Although in Gradle it is possible for an expert to customize their build in a +way that mitigates these issues, Mill does it automatically and in a way that +is foolproof even for non-experts. This helps democratize the build so that +any engineer can contribute fixes or improvements without needing to be a +build-system expert and learn all the best practices first. + +Lastly, as mentioned earlier, the Gradle script has limited IDE support: it can +autocomplete things for you, but once you try to jump-to-definition or otherwise +navigate your build you hit a wall: it tells you some minimal documentation about +the identifier, but nothing about how it is implemented or where it is used: + +image::comparisons/IntellijGradleResourcesClasses.png[] +image::comparisons/IntellijGradleResourcesClassesDefinition.png[] + +In contrast, IntelliJ is able to navigate straight to the definition of `compile()` in the +Mill build (as we saw earlier in <<_ide_experience>>), and from there can continue to +traverse the build via _jump to definition_ (which we saw earlier) or _find usages_, +as we saw earlier: + +image::comparisons/IntellijMockitoMillCompile.png[] +image::comparisons/IntellijMockitoMillCompileClasspath.png[] +image::comparisons/IntellijMockitoMillCompileUsages.png[] + +Mill build scripts are written in Scala, but you do not need to be an expert in Scala +to use Mill, just like you do not need to be an expert in Groovy to use Gradle. Because +Mill has great IDE support, and does the right things by default, I hope it would be +much easier for a non-expert to contribute to a Mill build than it would be for a +non-expert to contribute to Gradle + +== Conclusion + + +Both the Mill and Gradle builds we discussed in this case study do the same thing: they +compile Java code and run tests. Sometimes they perform additional configuration, tweaking +JVM arguments or doing ad-hoc classpath mangling. + +In general, building projects with Mill is significantly faster than Gradle, but the gap +is not as big as when comparing xref:comparisons/maven.adoc[Mill v.s. Maven]. Mill builds +do all the same things as gradle builds, and need to manage the same kind of complexity. +But where Mill shines over Gradle is just the understandability of the build: while Gradle is +https://news.ycombinator.com/item?id=25801986[famously confusing and opaque], Mill's great +IDE support allows the user to explore and understand their build as easily as any +application codebase, and its fool-proof approach to extensibility means non-experts can +confidently modify or add to their build system without worrying about getting it wrong. + +Again, the Mill build used in this comparison is for demonstration purposes, and more +work would be necessary to make the Mill build production ready: publishing configuration, +code coverage integration, and so on. Furthermore, Mill is definitely not perfect, +and it is a work in progress to improve the user experience and iron out bugs. However, +hopefully this comparison demonstrates the potential value, and convinces you to give it a try! diff --git a/docs/modules/ROOT/pages/comparisons/maven.adoc b/docs/modules/ROOT/pages/comparisons/maven.adoc new file mode 100644 index 00000000000..c583a5c1b4e --- /dev/null +++ b/docs/modules/ROOT/pages/comparisons/maven.adoc @@ -0,0 +1,655 @@ += Case Study: Mill vs Maven +:page-aliases: Case_Study_Mill_vs_Maven.adoc + +include::partial$gtag-config.adoc[] + +Compared to Maven: + + +* **Mill follows Maven's innovation of good built-in defaults**: Mill's built-in +``JavaModule``s follow Maven's "convention over configuration" style, so small Mill +projects require minimal effort to get started, and larger Mill projects have a consistent +structure building on these defaults. + +* **Mill automatically caches and parallelizes your build, offering 4-10x speedups**: +Not just the built-in tasks that Mill ships with, but also any custom tasks or modules. +This maximizes the snappiness of your command-line build workflows to keep you productive, +which especially matters in larger codebases where builds tend to get slow: +xref:comparisons/maven.adoc#_performance[a Maven "clean install" workflow + taking over a minute might take just a few seconds in Mill]. + +* **Mill makes customizing the build tool much easier than Maven**. Projects usually +grow beyond just compiling a single language: needing custom +code generation, linting workflows, tool integrations, output artifacts, or support for +additional languages. Mill's xref:comparisons/maven.adoc#_extensibility_ide_experience[extensibility and IDE experience] +makes doing this yourself easy and safe, with type-checked code and +xref:depth/sandboxing.adoc[sandboxed tasks] + + +This page compares using Mill to Maven, using the https://github.com/netty/netty[Netty Network Server] +codebase as the example. Netty is a large, old codebase. 500,000 lines of Java, written by +over 100 contributors across 15 years, split over 47 subprojects, with over 10,000 lines of +Maven `pom.xml` configuration alone. By porting it to Mill, this case study should give you +an idea of how Mill compares to Maven in larger, real-world projects. + +To do this, we have written a Mill `build.mill` file for the Netty project. This can be used +with Mill to build and test the various submodules of the Netty project without needing to +change any other files in the repository: + +- https://github.com/com-lihaoyi/mill/blob/main/example/thirdparty/netty/build.mill[Netty build.mill file] + +== Completeness + +The Mill build for Netty is not 100% complete, but it covers most of the major parts of Netty: +compiling Java, compiling and linking C code via JNI, running JUnit tests and some integration +tests using H2Spec. All 47 Maven subprojects are modelled using Mill, with the entire Netty codebase +being approximately 500,000 lines of code. + +```bash +$ git ls-files | grep \\.java | xargs wc -l +... +513805 total +``` + +The goal of this exercise is not to be 100% feature complete enough to replace the Maven build +today. It is instead meant to provide a realistic comparison of how using Mill in a large, +complex project compares to using Maven. + +Both Mill and Maven builds end up compiling the same set of files, although the number being +reported by the command line is slightly higher for Mill (2915 files) than Maven (2822) due +to differences in the reporting (e.g. Maven does not report `package-info.java` files as part +of the compiled file count). + +== Performance + +The Mill build for Netty is much more performant than the default Maven build. This applies to +most workflows. + +For the benchmarks below, each provided number is the wall time of three consecutive runs +on my M1 Macbook Pro. While ad-hoc, these benchmarks are enough to give you a flavor of how +Mill's performance compares to Maven: + +[cols="1,1,1,1"] +|=== +| Benchmark | Maven | Mill | Speedup + + +| <> | 98.80s | 23.14s | 4.3x +| <> | 48.92s | 8.79s | 5.6x +| <> | 4.89s | 1.11s | 4.4x +| <> | 6.82s | 0.54s | 12.6x +| <> | 5.25s | 0.47s | 11.2x +|=== + +The column on the right shows the speedups of how much faster Mill is compared to the +equivalent Maven workflow. In most cases, Mill is 4-10x faster than Maven. Below, we +will go into more detail of each benchmark: how they were run, what they mean, and how +we can explain the difference in performing the same task with the two different build tools. + +=== Sequential Clean Compile All + +```bash +$ ./mvnw clean; time ./mvnw -Pfast -Dcheckstyle.skip -Denforcer.skip=true -DskipTests install +98.80s +96.14s +99.95s + +$ ./mill clean; time ./mill -j1 __.compile +23.99s +23.14s +22.68s +``` + +This benchmark exercises the simple "build everything from scratch" workflow, with all remote +artifacts already in the local cache. The actual files +being compiled are the same in either case (as mentioned in the <> section). +I have explicitly disabled the various linters and tests for the Maven build, to just focus +on the compilation of Java source code making it an apples-to-apples comparison. As Mill +runs tasks in parallel by default, I have disabled parallelism explicitly via `-j1` + +As a point of reference, Java typically compiles at 10,000-50,000 lines per second on a +single thread, and the Netty codebase is ~500,000 lines of code, so we would expect compile +to take 10-50 seconds without parallelism. +The 20-30s taken by Mill seems about what you would expect for a codebase of this size, +and the ~150s taken by Maven is far beyond what you would expect from simple Java compilation. + +==== Maven Compile vs Install + +In general, the reason we have to use `./mvnw install` rather than `./mvnw compile` is that +Maven's main mechanism for managing inter-module dependencies is via the local artifact cache +at `~/.m2/repository`. Although many workflows work with `compile`, some don't, and +`./mvnw clean compile` on the Netty repository fails with: + +```text +[ERROR] Failed to execute goal org.apache.maven.plugins:maven-dependency-plugin:2.10:unpack-dependencies +(unpack) on project netty-resolver-dns-native-macos: Artifact has not been packaged yet. +When used on reactor artifact, unpack should be executed after packaging: see MDEP-98. -> [Help 1] +[ERROR] +[ERROR] To see the full stack trace of the errors, re-run Maven with the -e switch. +[ERROR] Re-run Maven using the -X switch to enable full debug logging. +[ERROR] +[ERROR] For more information about the errors and possible solutions, please read the following articles: +[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoExecutionException +[ERROR] +[ERROR] After correcting the problems, you can resume the build with the command +[ERROR] mvn -rf :netty-resolver-dns-native-macos +``` + +In contrast, Mill builds do not rely on the local artifact cache, even though Mill is able +to publish to it. That means Mill builds are able to work directly with classfiles on disk, +simply referencing them and using them as-is without spending time packing and unpacking them +into `.jar` files. Furthermore, even if we _did_ want Mill to generate the ``.jar``s, the +overhead of doing so is just a few seconds, far less than the two entire minutes that +Maven's overhead adds to the clean build: + +```bash +$ ./mill clean; time ./mill -j1 __.jar +32.58s +24.90s +23.35s +``` + +From this benchmark, we can see that although both Mill and Maven are doing the same work, +Mill takes about as long as it _should_ for this task of compiling 500,000 lines of Java source +code, while Maven takes considerably longer. And much of this overhead comes from Maven +doing unnecessary work packing/unpacking jar files and publishing to a local repository, +whereas Mill directly uses the classfiles generated on disk to bypass all that work. + +=== Parallel Clean Compile All + +```bash +$ ./mvnw clean; time ./mvnw -T 10 -Pfast -DskipTests -Dcheckstyle.skip -Denforcer.skip=true install +48.92s +48.41s +49.50s + +$ ./mill clean; time ./mill __.compile +9.07s +8.79s +7.93s +``` + +This example compares Maven v.s. Mill, when performing the clean build on 10 threads. +Both build tools support parallelism (`-T 10` in Maven, by default in Mill), and both +tools see a similar ~2x speedup for building the Netty project using 4 threads. Again, +this tests a clean build using `./mvnw clean` or `./mill clean`. + +This comparison shows that much of Mill's speedup over Maven is unrelated to parallelism. +Whether sequential or parallel, Mill has approximately the same ~7x speedup over Maven +when performing a clean build of the Netty repository. + +=== Clean Compile Single-Module + +```bash +$ ./mvnw clean; time ./mvnw -pl common -Pfast -DskipTests -Dcheckstyle.skip -Denforcer.skip=true -Dmaven.test.skip=true install +4.85s +4.96s +4.89s + +$ ./mill clean common; time ./mill common.compile +1.10s +1.12s +1.11s +``` + +This exercise limits the comparison to compiling a single module, in this case `common/`, +ignore test sources. + +Again, we can see a significant speedup of Mill v.s. Maven remains even when compiling a +single module: a clean compile of `common/` is about 4x faster with Mill than with Maven! +Again, `common/` is about 30,000 lines of Java source code, so at 10,000-50,000 lines per +second we would expect it to compile in about 1-4s. That puts Mill's compile times right +at what you would expect, whereas Maven's has a significant overhead. + +=== Incremental Compile Single-Module + +```bash +$ echo "" >> common/src/main/java/io/netty/util/AbstractConstant.java +$ time ./mvnw -pl common -Pfast -DskipTests -Dcheckstyle.skip -Denforcer.skip=true install +Compiling 174 source files to /Users/lihaoyi/Github/netty/common/target/classes +Compiling 60 source files to /Users/lihaoyi/Github/netty/common/target/test-classes + +6.89s +6.34s +6.82s + + +$ echo "" >> common/src/main/java/io/netty/util/AbstractConstant.java +$ time ./mill common.test.compile +compiling 1 Java source to /Users/lihaoyi/Github/netty/out/common/compile.dest/classes ... + +0.78s +0.54s +0.51s +``` + +This benchmark explores editing a single file and re-compiling `common/`. + +Maven by default takes about as long to re-compile ``common/``s `main/` and `test/` sources +after a single-line edit as it does from scratch, about 20 seconds. However, Mill +takes just about 0.5s to compile and be done! Looking at the logs, we can see it is +because Mill only compiles the single file we changed, and not the others. + +For this incremental compilation, Mill uses the +https://github.com/sbt/zinc[Zinc Incremental Compiler]. Zinc is able to analyze the dependencies +between files to figure out what needs to re-compile: for an internal change that doesn't +affect downstream compilation (e.g. changing a string literal) Zinc only needs to compile +the file that changed, taking barely half a second: + +```diff +$ git diff +diff --git a/common/src/main/java/io/netty/util/AbstractConstant.java b/common/src/main/java/io/netty/util/AbstractConstant.java +index de16653cee..9818f6b3ce 100644 +--- a/common/src/main/java/io/netty/util/AbstractConstant.java ++++ b/common/src/main/java/io/netty/util/AbstractConstant.java +@@ -83,7 +83,7 @@ public abstract class AbstractConstant> implements + return 1; + } + +- throw new Error("failed to compare two different constants"); ++ throw new Error("failed to compare two different CONSTANTS!!"); + } + + } +``` +```bash +$ time ./mill common.test.compile +[info] compiling 1 Java source to /Users/lihaoyi/Github/netty/out/common/compile.dest/classes ... +0m 00.55s6 +``` + +In contrast, a change to a class or function public signature (e.g. adding a method) may +require downstream code to re-compile, and we can see that below: + +```diff +$ git diff +diff --git a/common/src/main/java/io/netty/util/AbstractConstant.java b/common/src/main/java/io/netty/util/AbstractConstant.java +index de16653cee..f5f5a93e0d 100644 +--- a/common/src/main/java/io/netty/util/AbstractConstant.java ++++ b/common/src/main/java/io/netty/util/AbstractConstant.java +@@ -41,6 +41,10 @@ public abstract class AbstractConstant> implements + return name; + } + ++ public final String name2() { ++ return name; ++ } ++ + @Override + public final int id() { + return id; +``` +```bash +$ time ./mill common.test.compile +[25/48] common.compile +[info] compiling 1 Java source to /Users/lihaoyi/Github/netty/out/common/compile.dest/classes ... +[info] compiling 2 Java sources to /Users/lihaoyi/Github/netty/out/common/compile.dest/classes ... +[info] compiling 4 Java sources to /Users/lihaoyi/Github/netty/out/common/compile.dest/classes ... +[info] compiling 3 Java sources to /Users/lihaoyi/Github/netty/out/common/test/compile.super/mill/scalalib/JavaModule/compile.dest/classes ... +[info] compiling 1 Java source to /Users/lihaoyi/Github/netty/out/common/test/compile.super/mill/scalalib/JavaModule/compile.dest/classes ... +0m 00.81s2 +``` + +Here, we can see that Zinc ended up re-compiling 7 files in `common/src/main/` and 3 files +in `common/src/test/` as a result of adding a method to `AbstractConstant.java`. + +In general, Zinc is conservative, and does not always end up selecting the minimal set of +files that need re-compiling: e.g. in the above example, the new method `name2` does not +interfere with any existing method, and the ~9 downstream files did not actually need to +be re-compiled! However, even conservatively re-compiling 9 files is much faster than +Maven blindly re-compiling all 234 files, and as a result the iteration loop of +editing-compiling-testing your Java projects in Mill can be much faster than doing +the same thing in Maven + +=== No-Op Compile Single-Module + +```bash +$ time ./mvnw -pl common -Pfast -DskipTests -Dcheckstyle.skip -Denforcer.skip=true install +5.08s +5.25s +5.26s + +$ time ./mill common.test.compile +0.49s +0.47s +0.45s +``` + +This last benchmark explores the boundaries of Maven and Mill: what happens if +we ask to compile a single module _that has already been compiled_? In this case, +there is literally _nothing to do_. For Maven, "doing nothing" takes ~17 seconds, +whereas for Mill we can see it complete and return in less than 0.5 seconds + +Grepping the logs, we can confirm that both build tools skip re-compilation of the +`common/` source code. In Maven, skipping compilation only saves us ~2 seconds, +bringing down the 19s we saw in <> to 17s here. This +matches what we expect about Java compilation speed, with the 2s savings on +40,000 lines of code telling us Java compiles at ~20,000 lines per second. However, +we still see Maven taking *17 entire seconds* before it can decide to do nothing! + +In contrast, doing the same no-op compile using Mill, we see the timing from 2.2s +in <> to 0.5 seconds here. This is the same ~2s reduction +we saw with Maven, but due to Mill's minimal overhead, in the end the command +finishes in less than half a second. + + +== Extensibility & IDE Experience + +Even though Maven is designed to be declarative, in many real-world codebases you end +up needing to run ad-hoc scripts and logic. This section will explore two such scenarios, +so you can see how Mill differs from Maven in the handling of these requirements. + +=== JVM Libraries: Groovy + +The Maven build for the `common/` subproject +uses a Groovy script for code generation. This is configured via: + +```xml + + ${project.basedir}/src/main/templates + ${project.basedir}/src/test/templates + ${project.build.directory}/generated-sources/collections/java + ${project.build.directory}/generated-test-sources/collections/java + + + org.codehaus.gmaven + groovy-maven-plugin + 2.1.1 + + + org.codehaus.groovy + groovy + 3.0.9 + + + ant + ant-optional + 1.5.3-1 + + + + + generate-collections + generate-sources + + execute + + + ${project.basedir}/src/main/script/codegen.groovy + + + + +``` + +In contrast, the Mill build configures the code generation as follows: + +```scala +import $ivy.`org.codehaus.groovy:groovy:3.0.9` +import $ivy.`org.codehaus.groovy:groovy-ant:3.0.9` +import $ivy.`ant:ant-optional:1.5.3-1` + +object common extends NettyModule{ + ... + def script = Task.Source(millSourcePath / "src" / "main" / "script") + def generatedSources0 = Task { + val shell = new groovy.lang.GroovyShell() + val context = new java.util.HashMap[String, Object] + + context.put("collection.template.dir", "common/src/main/templates") + context.put("collection.template.test.dir", "common/src/test/templates") + context.put("collection.src.dir", (T.dest / "src").toString) + context.put("collection.testsrc.dir", (T.dest / "testsrc").toString) + + shell.setProperty("properties", context) + shell.setProperty("ant", new groovy.ant.AntBuilder()) + + shell.evaluate((script().path / "codegen.groovy").toIO) + + (PathRef(T.dest / "src"), PathRef(T.dest / "testsrc")) + } + + def generatedSources = Task { Seq(generatedSources0()._1)} +} +``` + +While the number of lines of code _written_ is not that different, the Mill configuration +is a lot more direct: rather than writing 35 lines of XML to configure an opaque third-party +plugin, we instead write 25 lines of code to directly do what we want: import `groovy`, +configure a `GroovyShell`, and use it to evaluate our `codegen.groovy` script. Although +you may not be familiar with the Scala language that Mill builds are written in, you could +probably skim the snippet above and guess what it is doing, and guess correctly. + +This direct control means you are not beholden to third party plugins: rather than being +limited to what an existing plugin _allows_ you to do, Mill allows you to directly write +the code necessary to do what _you need to do_. In this case, if we need to invoke +https://github.com/apache/groovy[Groovy] and +https://github.com/groovy/groovy-core/blob/4c05980922a927b32691e4c3eba5633825cc01e3/subprojects/groovy-ant/src/spec/doc/groovy-ant-task.adoc[Groovy-Ant], +Mill allows us to direct xref:extending/import-ivy-plugins.adoc[import $ivy] the relevant +JVM artifacts from Maven Central and begin using them in our build code in a safe, +strongly-typed fashion, with full autocomplete and code assistance: + +image::comparisons/IntellijNettyAutocomplete.png[] + +Mill gives you the full power of the JVM ecosystem to use in your build: any Java library +on Maven central is just an `import $ivy` away, and can be used with the full IDE support +and tooling experience you are used to in the JVM ecosystem. + +=== Subprocesses: Make + +The Maven build for the `transport-native-unix-common/` subproject needs to call +`make` in order to compile its C code to modules that can be loaded into Java applications +via JNI. Maven does this via the `maven-dependency-plugin` and `maven-antrun-plugin` which are +approximately configured as below: + +```xml + + make + gcc + ar + libnetty-unix-common + ${project.basedir}/src/main/c + ${project.build.directory}/netty-jni-util/ + ${project.build.directory}/native-jar-work + ${project.build.directory}/native-objs-only + ${project.build.directory}/native-lib-only + + + + + org.apache.maven.plugins + maven-dependency-plugin + + + + unpack + generate-sources + + unpack-dependencies + + + io.netty + netty-jni-util + sources + ${jniUtilIncludeDir} + **.h,**.c + false + true + + + + + + maven-antrun-plugin + + + + build-native-lib + generate-sources + + run + + + + + + + + + + + + + + + + + + + + + +``` + +The `maven-dependency-plugin` is used to download and unpack a single `jar` file, +while `maven-antrun-plugin` is used to call `make`. Both are configured via XML, +with the `make` command essentially being a bash script wrapped in layers of XML. + +In contrast, the Mill configuration for this logic is as follows: + +```scala +def makefile = Task.Source(millSourcePath / "Makefile") +def cSources = Task.Source(millSourcePath / "src" / "main" / "c") +def cHeaders = Task { + for(p <- os.walk(cSources().path) if p.ext == "h"){ + os.copy(p, T.dest / p.relativeTo(cSources().path), createFolders = true) + } + PathRef(T.dest) +} + +def make = Task { + os.copy(makefile().path, T.dest / "Makefile") + os.copy(cSources().path, T.dest / "src" / "main" / "c", createFolders = true) + + val Seq(sourceJar) = resolveDeps( + deps = Task.Anon(Agg(ivy"io.netty:netty-jni-util:0.0.9.Final").map(bindDependency())), + sources = true + )().toSeq + + os.proc("jar", "xf", sourceJar.path).call(cwd = T.dest / "src" / "main" / "c") + + os.proc("make").call( + cwd = T.dest, + env = Map( + "CC" -> "clang", + "AR" -> "ar", + "JNI_PLATFORM" -> "darwin", + "LIB_DIR" -> "lib-out", + "OBJ_DIR" -> "obj-out", + "MACOSX_DEPLOYMENT_TARGET" -> "10.9", + "CFLAGS" -> Seq( + "-mmacosx-version-min=10.9", "-O3", "-Werror", "-Wno-attributes", "-fPIC", + "-fno-omit-frame-pointer", "-Wunused-variable", "-fvisibility=hidden", + "-I" + sys.props("java.home") + "/include/", + "-I" + sys.props("java.home") + "/include/darwin", + "-I" + sys.props("java.home") + "/include/linux", + ).mkString(" "), + "LD_FLAGS" -> "-Wl,--no-as-needed -lrt -Wl,-platform_version,macos,10.9,10.9", + "LIB_NAME" -> "libnetty-unix-common" + ) + ) + + (PathRef(T.dest / "lib-out"), PathRef(T.dest / "obj-out")) +} +``` + +```graphviz +digraph G { + rankdir=LR + node [shape=box width=0 height=0 style=filled fillcolor=white] + makefile -> make + cSources -> make + cSources -> cHeaders +} +``` + +In Mill, we define the `makefile`, `cSources`, `cHeaders`, and `make` tasks. The bulk +of the logic is in `def make`, which prepares the `makefile` and C sources, +resolves the `netty-jni-util` source jar and unpacks it with `jar xf`, and calls `make` +with the given environment variables. Both `cHeaders` and the output of `make` are used +in downstream modules. In this case, `make` is a command-line utility rather than a JVM +library, so rather than importing it from Maven Central we use `os.proc.call` to invoke it. + +Again, the Maven XML and Mill code contains exactly the same logic, and neither is +much more concise or verbose than the other. Rather, what is interesting is that +it is much easier to work with this kind of _build logic_ via _concise type-checked code_, +rather than configuring a bunch of third-party plugins to try and achieve what you want. +With Mill, you get your full IDE experience working with your build: autocomplete, code +assistance, navigation, and so on. Although working with the `os.proc.call` subprocess API +is not as right as working with the JVM libraries we saw earlier, it is still a much +richer experience than you typically get configuring XML files: + +image::comparisons/IntellijNettyPeekDocs.png[] + + +== Debugging Tooling + +Another area that Mill does better than Maven is providing builtin tools for you to understand +what your build is doing. For example, the Netty project build discussed has 47 submodules +and associated test suites, but how do these different modules depend on each other? With +Mill, you can run `./mill visualize __.compile`, and it will show you how the +`compile` task of each module depends on the others (right-click open-image-in-new-tab to see +at full size): + +image::comparisons/NettyCompileGraph.svg[] + +Apart from the static dependency graph, another thing of interest may be the performance +profile and timeline: where the time is spent when you actually compile everything. With +Mill, when you run a compilation using `./mill __.compile`, you automatically get a +`out/mill-chrome-profile.json` file that you can load into your `chrome://tracing` page and +visualize where your build is spending time and where the performance bottlenecks are: + +image::comparisons/NettyCompileProfile.png[] + +If you want to inspect the tree of third-party dependencies used by any module, the +built in `ivyDepsTree` command lets you do that easily: + +```bash +$ ./mill handler.ivyDepsTree +├─ org.jctools:jctools-core:4.0.5 +├─ org.junit.jupiter:junit-jupiter-api:5.9.0 +│ ├─ org.apiguardian:apiguardian-api:1.1.2 +│ ├─ org.junit.platform:junit-platform-commons:1.9.0 +│ │ └─ org.apiguardian:apiguardian-api:1.1.2 +│ └─ org.opentest4j:opentest4j:1.2.0 +└─ com.google.protobuf:protobuf-java:2.6.1 +``` + +None of these tools are rocket science, but Mill provides all of them out of the +box in a convenient package for you to use. Whether you want a visual graph layout, +a parallel performance profile, or a third-party dependency tree of your project, +Mill makes it easy and convenient without needing to fiddle with custom configuration +or third party plugins. This helps make it easy for you to explore, understand, and +take ownership of the build tool. + +== Conclusion + +Both the Mill and Maven builds we discussed in this case study do the same thing: they +compile Java code, zip them into Jar files, run tests. Sometimes they compile and link +C code or run `make` or Groovy. + +Mill doesn't try to do _more_ than Maven does, but it +tries to do it _better_: faster compiles, shorter and easier to read configs, easier +extensibility via libraries (e.g. `org.codehaus.groovy:groovy`) and subprocesses +(e.g. `make`), better IDE support for working with your build. + +Again, the Mill build used in this comparison is for demonstration purposes, and more +work would be necessary to make the Mill build production ready: compatibility with +different operating system architectures, Java versions, and so on. However, hopefully +it demonstrates the potential value: greatly improved performance, easy extensibility, +and a much better IDE experience for working with your build. +Mill provides builtin tools to help you navigate, +visualize, and understand your build, turning a normally opaque "build config" into +something that's transparent and easily understandable. \ No newline at end of file diff --git a/docs/modules/ROOT/pages/comparisons/sbt.adoc b/docs/modules/ROOT/pages/comparisons/sbt.adoc new file mode 100644 index 00000000000..cc84733024c --- /dev/null +++ b/docs/modules/ROOT/pages/comparisons/sbt.adoc @@ -0,0 +1,316 @@ += Case Study: Mill vs SBT +:page-aliases: Case_Study_Mill_vs_SBT.adoc + +include::partial$gtag-config.adoc[] + +Compared to SBT, + +* **Mill makes customizing the build yourself much easier**: most of what build tools +do work with files and call subprocesses, and Mill makes doing that yourself easy. +This means you can always make your Mill build do exactly what you want, and are not +beholden to third-party plugins that may not meet your exact needs or interact well +with each other. + +* **Mill is much more performant**: SBT has enough overhead that even a dozen +subprojects is enough to slow it down, while Mill can handle hundreds of modules without issue. +Custom tasks in SBT re-execute every time, whereas in Mill they are cached automatically. +Mill's watch-for-changes-and-re-run implementation has much lower latency than SBT's. The +list of ways Mill improves upon SBT's performance is long, and at the command line you +can really feel it + +* **Mill builds are much easier to understand**: Your Mill build is made of bog-standard +``object``s and ``def``s, rather than SBT's +https://eed3si9n.com/4th-dimension-with-sbt-013/[four-dimensional task matrix]. Your IDE's +"*jump-to-definition*" in Mill actually brings you to the implementation of a task, rather +than an SBT `taskKey` declaration. Customizing things is as simple as writing or overriding +`def`s. The net effect is that despite both tools' build files being written in Scala, +Mill's build files are much easier to understand and maintain. + +This page compares using Mill to SBT, using the https://github.com/gatling/gatling[Gatling Load Testing Framework] +codebase as the example. Gatling is a medium sized codebase, 40,000 lines of Scala split over 21 +subprojects. By porting it to Mill, this case study should give you an idea of how Mill compares +to SBT in more realistic, real-world projects. + +In general, in the ideal case Mill and SBT have similar performance: caching, parallelism, incremental +compilation, and so on. Mill's main advantage over SBT is its simplicity: + +* You do not need to keep a live SBT session to maximize performance, exit SBT to run Bash commands, + or juggle multiple terminal windows to run SBT in one and Bash in another. Instead, you can just + run Mill like any command line tool, and Mill caches and parallelizes to maximize performance automatically + +* Mill's IDE support is better than SBTs due to how Mill is designed: peek-at-documentation, + jump-to-definition, find-overrides, etc. is much more useful since your IDE understands Mill + much better than it understands SBT. + +* Mill comes with a lot of useful debugging tools builtin, without needing to juggle external plugins: + visualizing subproject dependencies, performance profiles, third-party dependency trees. This helps + you understand what your Mill build is doing. + +To do this comparison, we have written a Mill `build.mill` file for the Gatling project. This can be used +with Mill to build and test the various submodules of the Gatling project without needing to +change any other files in the repository: + +- https://github.com/com-lihaoyi/mill/blob/main/example/thirdparty/gatling/build.mill[Gatling build.mill file] + +== Completeness + +The Mill build for Gatling is not 100% complete, but it covers most of the major parts of Gatling: +compiling Scala, running tests. It does not currently cover linting via +https://github.com/diffplug/spotless[Spotless], as that is not built-in to Mill, but it could be +added as necessary. + +The goal of this exercise is not to be 100% feature complete enough to replace the SBT build +today. It is instead meant to provide a realistic comparison of how using Mill in a realistic, +real-world project compares to using SBT. + +== Performance + +[cols="1,1,1,1"] +|=== +| Benchmark | Cold SBT | Hot SBT (rounded) | Mill + +| <> | 34.28s | ≈ 14s | 10.4s +| <> | 10.1s | ≈ 1s | 0.96s +| <> | 6.2s | ≈ 0s | 0.48s +| <> | 4.2s | ≈ 0s | 0.40s +|=== + +SBT can be used in two modes, either "cold" run directly from the command line, or "hot" +where an SBT session is kept open and commands are run within in. I provide the timings for +both scenarios above, along with the time taken for Mill commands. Mill does not have this +distinction, and can only be run directly from the command line. The `Hot SBT` mode only +reports timings to the nearest second, so that is the number used in this comparison. + +The Mill build benchmarks for Gatling is generally much snappier than the `Cold SBT` benchmark, +and comparable to that `Hot SBT` benchmark. Mill is marginally faster in the +`Parallel Clean Compile All` benchmark (10s vs 14s), but more importantly does not have the same +_Cold vs Hot_ distinction that SBT has: as Mill is always run "cold" from the command line and +keeps the process around to provide "hot" performance automatically. + +For the benchmarks above, each provided number is the median wall time of three consecutive runs +on my M1 Macbook Pro. While ad-hoc, these benchmarks are enough to give you a flavor of how +Mill's performance compares to SBT. For a fair comparison, we disabled `gatling-build-plugin` +in the sbt setup, which bundles the various scalafmt/scalafix/etc. linters as part of `compile`, +since Mill doesn't bundle them and instead expects them to be invoked separately. + +=== Parallel Clean Compile All + +```bash +$ sbt clean; time sbt test:compile +34.28s +32.84s +34.55s + +$ sbt + +sbt> clean; test:compile +15s +13s +14s + +$ ./mill clean; time ./mill -j 10 __.compile +10.7s +9.4s +10.4s +``` + +This benchmark measures the time taken to parallel compile all the Java and Scala code in +the Gatling code base. We configure Mill to do the same using the same number of threads +(10 on my laptop) as SBT uses. As SBT runs in parallel by default, we do not have a +comparison for sequential execution times. + +=== Clean Compile Single-Module + +```bash +> sbt clean; time sbt gatling-commons/compile +10.1 +10.7 +10.1 + +sbt> clean; gatling-common/compile +1s +1s +1s + +$ ./mill clean; time ./mill gatling-common.compile +0.96s +0.95s +0.96s +``` + +This benchmark indicates the use case of clean-compiling a single module. In this case, +the `gatling-commons` module's application code in `commons/, _excluding_ the test code in +and all the downstream submodules. + +=== Incremental Compile Single-Module + +```bash +$ echo "" >> gatling-commons/src/main/scala/io/gatling/commons/util/Arrays.scala +$ time sbt gatling-commons/compile +6.6s +6.2s +6.0s + +sbt> gatling-commons/compile +0s +0s +0s + +$ echo "" >> gatling-commons/src/main/scala/io/gatling/commons/util/Arrays.scala +$ time ./mill gatling-commons.compile +0.49s +0.48s +0.47s +``` + +This benchmark measures the common case of making a tiny change to a single file and +re-compiling just that module. This is the common workflow that most software developers +do over and over day-in and day-out. We simulate this by appending a new line to the +file `gatling-commons/src/main/scala/io/gatling/commons/util/Arrays.scala`. + +Both Mill and SBT are able to take advantage of the small code change and re-compile +only the single files needing re-compilation, demonstrating substantial speedups over +the <> benchmark above. Both "Hot SBT" and "Mill" finish in +a fraction of a second, while "Cold SBT" has substantial overhead. + +=== No-Op Compile Single-Module + +```bash +$ time sbt gatling-commons/compile +4.2s +4.2s +4.2s + +sbt> gatling-commons/compile +0s +0s +0s + +$ time ./mill gatling-commons.compile +0.39s +0.41s +0.40s +``` + +This benchmark is meant to measure the pure overhead of running the build tool: given a single +module that did _not_ change, the build tool should need to do _nothing_ in response, and so +any time taken is pure overhead. + +Again, we see both "Hot SBT" and "Mill" finish in a fraction of a second, with the Mill numbers +showing a ~0.4s overhead to run Mill even when there is no work to do, and the "Cold SBT" has +in comparison substantial >4s overhead. + +== IDE Support + +One area that Mill does significantly better than SBT is in the IDE support. For example, although +IDEs like IntelliJ are nominally able to parse and analyze your SBT files, the assistance they can +provide is often not very useful. For example, consider the inspection and jump-to-definition experience +of looking into an SBT Task: + +image::comparisons/IntellijGatlingSbtTask1.png[] +image::comparisons/IntellijGatlingSbtTask2.png[] + +Or an SBT plugin: + +image::comparisons/IntellijGatlingSbtPlugin1.png[] +image::comparisons/IntellijGatlingSbtPlugin2.png[] + +In general, although your IDE can make sure the name of the task exists, and the type is correct, it +is unable to pull up any further information about the task: its documentation, its implementation, +usages, any upstream overridden implementations, etc.. Some of this is the limitations of the IDE, +but some of it is fundamental: because SBT makes the developer define the `val myTask` separate +from the assignment of `myTask := something`, jumping to the definition of `myTask` tells you nothing +at all: what it does, where it is assigned, etc. + +In comparison, for Mill, IDEs like Intellij are able to provide much more intelligence. e.g. when +inspecting a task, it is able to pull up the documentation comment: + +image::comparisons/IntellijGatlingMillTask1.png[] + +It is able to pull up any overridden implementations of task, directly in the editor: + +image::comparisons/IntellijGatlingMillTask2.png[] + +And you can easily navigate to the overriden implementations to see where they are defined and +what you are overriding: + +image::comparisons/IntellijGatlingMillTask3.png[] + +Mill's equivalent of SBT plugins are just Scala traits, and again you can easily pull up their +documentation in-line in the editor or jump to their full implementation: + +image::comparisons/IntellijGatlingMillPlugin1.png[] +image::comparisons/IntellijGatlingMillPlugin2.png[] + +In general, navigating around your build in Mill is much more straightforward than +navigating around your build in SBT. All your normal IDE functionality works perfectly: +jump-to-definition, find-usages, peek-at-documentation, and so on. Although the Mill +and SBT builds end up doing the same basic things - compiling Scala, running tests, +zipping up jars - Mill helps de-mystify things considerably so you are never blocked +wondering what your build tool is doing. + +== Debugging Tooling + +Another area that Mill does better than SBT is providing builtin tools for you to understand +what your build is doing. For example, the Gatling project build discussed has 21 submodules +and associated test suites, but how do these different modules depend on each other? With +Mill, you can run `./mill visualize __.compile`, and it will show you how the +`compile` task of each module depends on the others: + +image::comparisons/GatlingCompileGraph.svg[] + +Apart from the static dependency graph, another thing of interest may be the performance +profile and timeline: where the time is spent when you actually compile everything. With +Mill, when you run a compilation using `./mill -j 10 __.compile`, you automatically get a +`out/mill-chrome-profile.json` file that you can load into your `chrome://tracing` page and +visualize where your build is spending time and where the performance bottlenecks are: + +image::comparisons/GatlingCompileProfile.png[] + +If you want to inspect the tree of third-party dependencies used by any module, the +built in `ivyDepsTree` command lets you do that easily: + +```bash +$ ./mill gatling-app.ivyDepsTree +[137/137] gatling-app.ivyDepsTree +├─ org.scala-lang:scala-library:2.13.14 +├─ io.gatling:gatling-shared-model_2.13:0.0.6 +│ ├─ io.gatling:gatling-shared-util_2.13:0.0.8 +│ │ ├─ org.scala-lang:scala-library:2.13.14 +│ │ └─ org.scala-lang.modules:scala-collection-compat_2.13:2.11.0 +│ │ └─ org.scala-lang:scala-library:2.13.14 +│ ├─ io.suzaku:boopickle_2.13:1.3.3 +│ │ └─ org.scala-lang:scala-library:2.13.14 +│ └─ org.scala-lang:scala-library:2.13.14 +├─ io.gatling:gatling-shared-cli:0.0.3 +│ └─ com.github.spotbugs:spotbugs-annotations:4.8.4 -> 4.8.6 +│ └─ com.google.code.findbugs:jsr305:3.0.2 +├─ org.simpleflatmapper:lightning-csv:8.2.3 +│ └─ org.simpleflatmapper:sfm-util:8.2.3 +├─ com.github.ben-manes.caffeine:caffeine:3.1.8 +│ ├─ com.google.errorprone:error_prone_annotations:2.21.1 +│ └─ org.checkerframework:checker-qual:3.37.0 +... +``` + +None of these tools are rocket science, but Mill provides all of them out of the +box in a convenient package for you to use. Whether you want a visual graph layout, +a parallel performance profile, or a third-party dependency tree of your project, +Mill makes it easy and convenient without needing to fiddle with custom configuration +or third party plugins. This helps make it easy for you to explore, understand, and +take ownership of the build tool. + +== Conclusion + +Both the Mill and SBT builds we discussed in this case study do the same thing: they +compile Java and Scala code and run tests. If set up and used properly, SBT builds +are performant and do what needs to be done. + +Where Mill has an advantage over SBT is in its simplicity and understandability. You +do not need to worry about using it "the wrong way" and ending up with workflows running +slower than necessary. You can explore your build using your IDE like you would any other +project, tracing task dependencies using the same jump-to-definition you use to trace +method calls in your application code. Mill provides builtin tools to help you navigate, +visualize, and understand your build, turning a normally opaque "build config" into +something that's transparent and easily understandable. + diff --git a/docs/modules/ROOT/pages/comparisons/unique.adoc b/docs/modules/ROOT/pages/comparisons/unique.adoc new file mode 100644 index 00000000000..a760cd9c2c9 --- /dev/null +++ b/docs/modules/ROOT/pages/comparisons/unique.adoc @@ -0,0 +1,529 @@ +# What Makes Mill Unique + +include::partial$gtag-config.adoc[] + +https://mill-build.org/[Mill] is a JVM build tool that targets Java/Scala/Kotlin and has +potential to serve the large-monorepo codebases that Bazel currently serves. Mill has good +traction among its users, benchmarks that demonstrate 2-10x faster builds than its competitors, +and a unique "direct-style" design that make it easy to use and extend. This page discusses +some of the most interesting design decisions in Mill, and how it sets Mill apart from +other build tools on the market. + +## What is a Build Tool? + +A build tool is a program that coordinates the various tasks necessary to compile, +package, test, and run a codebase: maybe you need to run a compiler, download some dependencies, +package an executable or container. While a small codebase can get by with a shell script that +runs every task every time one at a time, such a naive approach gets slower +and slower as a codebase grows and the build tasks necessarily get more numerous and complex. + +In order to prevent development from grinding to a halt, you need to begin skipping the +build tasks you do not need at any point in time, and caching +and parallelizing those that you do. This often starts +off as some ad-hoc if-else statements in a shell script, but manually maintaining +skipping/caching/parallelization logic is tedious and error-prone. At some point it becomes +worthwhile to use an purpose built tool to do it for you, and that is when you turn +to build tools like https://maven.apache.org/[Maven], https://www.gnu.org/software/make/[Make], +https://mill-build.org/[Mill], or https://bazel.build/[Bazel]. For this article, +we will mostly discuss Mill. + +## What is Mill? + +The Mill build tool was started in 2017, an exploration of the ideas I +found when learning to use Google's https://bazel.build/[Bazel] build tool. +At a glance, Mill looks similar to other build tools you may be familiar with, with a +`build.mill` file in the root of a project defining the dependencies and testing +setup for a module: + +```scala +package build +import mill._, javalib._ + +object foo extends JavaModule { + def ivyDeps = Agg( + ivy"net.sourceforge.argparse4j:argparse4j:0.9.0", + ivy"org.thymeleaf:thymeleaf:3.1.1.RELEASE" + ) + + object test extends JavaTests with TestModule.Junit4 +} +``` + +The syntax may be a bit unfamiliar, but anyone familiar with programming can probably guess +what this build means: a `JavaModule` with two ivy dependencies `argparse4j` and `thymeleaf`, +and a `test` submodule supporting `Junit4`. +This build can then be compiled, tested, run, or packaged into an assembly from the command line: + +```bash +> /mill foo.compile +compiling 1 Java source... + +> /mill foo.run --text hello +

hello

+ +> ./mill foo.test +Test foo.FooTest.testEscaping finished, ... +Test foo.FooTest.testSimple finished, ... +0 failed, 0 ignored, 2 total, ... + +> ./mill show foo.assembly +".../out/foo/assembly.dest/out.jar" + +> ./out/foo/assembly.dest/out.jar --text hello +

hello

+``` + +Mill was originally a Scala build tool competing with https://scala-sbt.org/[SBT], and by 2023 it +had reached around 5-10% market share in the Scala community +(https://www.jetbrains.com/lp/devecosystem-2023/scala/[Jetbrains Survey], +https://scalasurvey2023.virtuslab.com/[VirtusLabs Survey]). +It recently grew first-class Java support, demonstrating +xref:comparisons/why-mill.adoc[2-10x speedups] over existing Java build tools +like Maven or Gradle. Mill also has gained experimental support for Java-adjacent platforms +like xref:kotlinlib/intro.adoc[Kotlin] and +xref:android/java.adoc[Android], and has demonstrated the ability to branch out into supporting +more distant toolchains like xref:extending/example-typescript-support.adoc[Typescript] +and xref:extending/example-python-support.adoc[Python]. + +Mill also works well with xref:large/large.adoc[large builds]: its build logic can be +xref:large/multi-file-builds.adoc[split into multiple folders], is incrementally compiled, +lazily initialized, and automatically cached and parallelized. That means that even large +codebases can remain fast and responsive: Mill's own build easily manages over 400 modules, +and the tool can likely handle thousands of modules without issue. + + +## The React.js of Build Tools + +We've briefly covered what Mill is above, but one question remains: why Mill? +Why not one of the other 100 build tools out there? + +Mill is unique in that it shares many of its core design decisions with https://react.dev/[React.js], +the popular Javascript UI framework. I was among the first external users of React when I +introduced it to Dropbox in 2014, and while people gripe about it today, React was +really a revolution in how Javascript UIs were implemented. UI flows that used to take +weeks suddenly took days, requiring a fraction of the code and complexity that they +previously took to implement + +React's two most important innovations are: + +1. Letting users write "direct style" code to define their UI - Javascript functions that + directly returned the HTML structure you wanted - rather than a "code behind" + approach of registering callbacks to mutate the UI in response to events + +2. Using a single "general purpose" programming language for your UI, rather than splitting + your logic into multiple special-purpose domain-specific languages + +While React does a huge number of clever things - +https://legacy.reactjs.org/docs/faq-internals.html[virtual dom diffing], +https://react.dev/learn/writing-markup-with-jsx[JSX], +https://react.dev/reference/react-dom/client/hydrateRoot[de/re-hydration], +etc. - all of those are only in service of the two fundamental ideas. e.g. At Dropbox we +used React for years without JSX, and many of the later frameworks inspired by React +provide a similar experience but use other techniques to replace virtual dom diffing. +Furthermore, React isn't limited to the HTML UIs, with the same techniques being +used to manage https://reactnative.dev/[mobile app UIs], +https://github.com/vadimdemedes/ink[terminal UIs], and many other scenarios + +Build tools and interactive UIs are on one hand different, but on the other hand +very similar: you are trying to update a large stateful system (whether a HTML page +or filesystem build artifacts) to your desired state in response to change in inputs +(whether user-clicks or source-file-edits). Like with React in 2014, these two ideas are +not widespread among build tools today in 2024. But many of the same downstream benefits apply, +and these ideas give Mill some unique properties as a build tool. + +### Direct-Style Builds + +One key aspect of React.js is that you wrote your code to generate your web UI "directly": + +* Before React, you would write Javascript code whose purpose was to mutate some HTML properties + to set up a forest of callbacks and event handlers. These would then be executed when a user + interacted with your website, causing further mutations to the HTML UI. This would often + recursively trigger other callbacks with further mutations, and you as the developer would + somehow need to ensure this all converges to the UI state that you desire. + +* In React, you had normal functions containing normal code that executed top-to-bottom, + each returning a JSX HTML snippet - really just a Javascript object - with the top-level + component eventually returning a snippet representing the entire UI. React would handle + all the update logic for you in an efficient manner, incrementally caching and optimizing + things automatically. The developer just naively returns the UI structure they want from + their React code and React.js does all the rest + +Before React you always had a tradeoff: do you re-render the whole UI every update (which +is easy to implement naively, but wasteful and disruptive to users) or do you do fine-grained UI +updates (which was difficult to implement, but efficient and user-friendly). React eliminated that +tradeoff, letting the developer write "naive" code as if they were re-rendering the entire +UI, while automatically optimizing it to be performant and provide a first-class user experience. + +Mill's approach as a build tool is similar: + +* Most existing build tools involve registering "task" callbacks to tell the build tool what + to do when certain actions happen or certain files change. These callbacks mutate the filesystem + in an ad-hoc manner, often recursively triggering further callbacks. It is up to the developer + to make sure that these callbacks and filesystem updates end up converging such that + your build outputs ends up containing the files you want. + +* With Mill, you instead write "direct-style" code: normal functions that call other + functions and end up returning the final metadata or files that were generated. + Mill handles the work of computing these functions efficiently: automatically caching, + parallelizing, and optimizing your build. The developer writes naive code computing and + returning the files they want, and Mill does all the rest to make it efficient and performant + +Earlier we saw a hello-world Mill build using the built in module types like `JavaModule`, +but if we remove these built in classes we can see how Mill works under the hood. Consider +the following Mill tasks that define some source files, use the `javac` executable to compile +them into classfiles, and then the `jar` executable to package them together into an assembly: + +```scala +def mainClass: T[Option[String]] = Some("foo.Foo") + +def sources = Task.Source(millSourcePath / "src") +def resources = Task.Source(millSourcePath / "resources") + +def compile = Task { + val allSources = os.walk(sources().path) + os.proc("javac", allSources, "-d", Task.dest).call() + PathRef(Task.dest) +} + +def assembly = Task { + for(p <- Seq(compile(), resources())) os.copy(p.path, Task.dest, mergeFolders = true) + + val mainFlags = mainClass().toSeq.flatMap(Seq("-e", _)) + os.proc("jar", "-c", mainFlags, "-f", Task.dest / "assembly.jar", ".") + .call(cwd = Task.dest) + + PathRef(Task.dest / "assembly.jar") +} +``` + +This code defines the following task graph, with the boxes being the tasks +and the arrows representing the _data-flow_ between them: + +```graphviz +digraph G { + rankdir=LR + node [shape=box width=0 height=0 style=filled fillcolor=white] + sources -> compile -> assembly + resources -> assembly + mainClass -> assembly +} +``` + +This example does not use any of Mill's builtin support for building Java or +Scala projects, and instead builds a pipeline "from scratch" using Mill +tasks and `javac`/`jar` subprocesses. We define `Task.Source` folders and +plain ``Task``s that depend on them, implementing entirely in our own code. + +Two things are worth noting about this code: + +1. It looks almost identical to the equivalent "naive" code you would write without using + a build tool! If you remove the `Task{...}` wrappers, you could run the code and it would + behave as a naive script running top-to-bottom every time and generating your + `assembly.jar` from scratch. But Mill allows you to take such naive code and turn it + into a build pipeline with parallelism, caching, invalidation, and so on. + +2. You do not see any logic at all related to parallelism, caching, invalidation in the code + at all! No `mtime` checks, no computing cache keys, no locks, no serializing and + de-serializing of data on disk. Mill handles all this for you automatically, so you just + need to write your "naive" code and Mill will provide all the "build tool stuff" for free. + + +This direct-style code has some surprising benefits: IDEs often not understand how registered +callbacks recursively trigger one another, but they _do_ understand function calls, and so +they should be able to seamlessly navigate up and down your build graph just by following +those functions. Below, we can see IntelliJ resolve `compile` to the exact `def compile` +definition in `build.foo`, allowing us to jump to it if we want to see what it does: + +image::unique/IntellijDefinition.png[] + +In the `JavaModule` example earlier, IntelliJ is able to see the `def ivyDeps` configuration +override, and find the exact override definitions in the parent class hierarchy: + +image::unique/IntellijOverride.png[] + +This "direct style" doesn't just make navigating your build easy for IDEs: human programmers +are _also_ used to navigating in and out of function calls, up and down class hierarchies, +and so on. Thus for a developer configuring or maintaining their build system, Mill's direct +style means they easier time understanding what is going on, especially compared to the +classic "callbacks forests" you may have come to expect from build tools. However, +both of these benefits require that the IDE and the human understands the code in the +first place, which leads to the second major design decision: + +### Using a Single General Purpose Language + +React.js makes users use Javascript to implement their HTML UIs. While a common approach +now in 2024, it is hard to overstate how controversial and unusual this design decision +was at the time. + +In 2014, web UIs were implemented in some HTML _templating language_ with separate CSS +source files, and "code behind" Javascript logic hooked in. This allowed separation of +concerns: a graphic designer could edit the HTML and CSS without needing to know +Javascript, and a programmer could edit the Javascript without needing to be an expert +in HTML/CSS. And so writing frontend code in three languages in three separate files +was the best practice, and so it was since the inception of the web two decades prior. + +React.js flipped all that on its head: everything was Javascript! UI components were Javascript +objects first, containing Javascript functions that returned HTML snippets (which +were really _also_ Javascript objects). CSS was often in-lined at the use site, perhaps +with constants fetched from a https://cssinjs.org/[CSS-in-JS] library. This was a total +departure from the previous two decades of web development best practices. + +While controversial, +this approach had two huge advantages: + +1. It broke the hard language barriers between HTML/CSS/JS, allowing more flexible + ways of organizing and grouping code in order to meet the + needs of the particular UI. While seemingly trivial, it makes a huge difference + to have one file in one language containing everything you need to know about a + UI component, rather than needing to tab between three files in three different languages. + +2. It removed the separate second-class "templating language". While the "platonic ideal" + was people writing HTML/CSS/JS, the HTML often ended up being https://jinja.palletsprojects.com/[Jinja2], + https://haml.info/[HAML], or https://mustache.github.io/[Mustache] templates instead, + and the CSS usually ended up being replaced by https://sass-lang.com/[SASS] or + https://lesscss.org/[LESS]. While Javascript was by no means perfect, having + everything in a single "real" programming language was a breath of fresh air + over tabbing between three different languages each with their own half-baked version + of language features like if-else, loops, functions, etc. + +The story for build tools is similar: the traditional wisdom has been +to implement your build logic in some limited "build language", in the past often +XML (e.g. for https://maven.apache.org/[Maven], https://github.com/dotnet/msbuild[MSBuild]), +nowadays often JSON/TOML/YAML (e.g. https://github.com/rust-lang/cargo[Cargo]), with +logic split out into separate shell scripts or plugins. While this worked, it always +had issues: + +1. Like web development, build tools _also_ had the logic split between multiple + languages. Templated-Bash-in-Yaml is a common outcome, Bazel makes you write + https://bazel.build/reference/be/make-variables[make-interpolated Bash in pseudo-Python], + Maven makes you choose between XML+Java to write plugins or + Bash-in-XML https://maven.apache.org/plugins/maven-antrun-plugin/[Ant scripts]. + Most build tools using "simple" config languages would inevitably find logic pushed + into shell scripts within the build, or the entire build tool itself wrapped in a shell + script to provide the flexibility a project needs + +2. These "simple build languages" would always start off simple, but eventually grow + real programming language features: not just if-else, loops, functions, inheritance, but + also package managers, package repositories, profilers, debuggers, and + more. These were always ad-hoc, designed and implemented in their own weird and + idiosyncratic ways, and generally inferior to the same feature or tool provided by + a real programming language. + +_"Config metadata turns into templating language turns into general-purpose language"_ +is a tale as old as time. Whether it's HTML templating using https://jinja.palletsprojects.com/en/stable/templates/[Jinja2], +CI configuration using https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does/evaluate-expressions-in-workflows-and-actions[Github Actions Config Expressions], +or infrastructure-as-code systems like https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference.html[Cloudformation Functions] +or https://helm.sh/docs/chart_best_practices/templates/[Helm Charts]. While the allure +of using a "simple" config language is strong, many systems inevitably end up growing +so many programming-language features that you would have been better off using a +general-purpose language to start off with. + + + +Mill follows React.js with its "One General-Purpose Language" approach: + +1. Mill tasks are just method definitions +2. Mill task dependencies are just method calls +3. Mill modules are just objects + +While this is not strictly true - Mill tasks and Mill modules have a small amount of extra +logic necessary to handling caching parallelization and other build tool necessities - it is +true enough that these details are often completely transparent to the user. + +This has the same benefits that React.js had from using a general-purpose language throughout: + +1. You can directly write code to wire up and perform your build logic all in one language, + without the nested Bash-nested-in-Mustache-templates-nested-in-YAML monstrosities common when + insufficiently flexible config languages are chosen. + +2. You _already know_ how programming languages works: not just conditionals loops and functions, + but also classes, inheritance, overrides, typechecking, IDE navigation, package repositories + and library ecosystem (in Mill's case, you can use everything on Java's Maven Central repository). + Rather than dealing with half-baked versions of these features that specialized languages + inevitable grow, Mill lets you use the real thing right off the bat. + +For example, in Mill you may not be familiar with the bundled libraries and APIs, but your +IDE can help you understand them: + +image::unique/IntellijDocs.png[] + +And if you make an error, e.g. you typo-ed `resources` as `reources`, your IDE will +immediately flag it for you even before you run the build: + +image::unique/IntellijError.png[] + +While all IDEs have good support for understanding JSON/TOML/YAML/XML, the support for +understanding _a particular tool's dialect of templated-bash-in-yaml_ is much more spotty. +Even IntelliJ, the gold standard, usually cannot provide more than basic assistance +editing templated-bash-in-yaml configs file. In contrast, IDE support +for a widely-used general purpose programming language is much more solid. + +As another example, if you need a production-quality templating engine to use in your build +system, you have a buffet of options. The common Java +xref:extending/import-ivy-plugins.adoc#_importing_java_libraries[Thymeleaf] templating engine is +available with a single import, as is the popular +xref:extending/import-ivy-plugins.adoc#_importing_scala_libraries[Scalatags] templating engine. +Rather than being limited to what the build tool has built-in or what third-party plugins +someone on the internet has published, you have at your fingertips any library in the huge JVM +ecosystem, and can use them in exactly the same way you would in any Java/Scala/Kotlin application. + +### What About Other Build Tools? + +There are existing build tools that use some of the ideas above, but perhaps none of them +have both, which is necessary to take full advantage: + +* Tools like https://gradle.org/[Gradle], https://ruby.github.io/rake/[Rake], or https://gulpjs.com/[Gulp] may be written + in a single language, but are not direct-style: they still rely on you registering a forest + of callbacks performing filesystem mutations, and manually ensuring that they are wired up to + converge to the state you want. This means that although that a human programmer or an IDE + like IntelliJ may be able to navigate around the Groovy/Kotlin/Ruby code used to configure the + build, both human and machine often have trouble tracing through the forest of mutating callbacks + to figure out what is actually happening + +* Tools like https://github.com/rust-lang/cargo[Cargo], https://maven.apache.org/[Maven], or `go build` + are very inflexible. This leads either to embedded shell scripts (or embedded-shell-scripts-as-XML + such as the https://maven.apache.org/plugins/maven-antrun-plugin/[Maven AntRun Plugin]!), or + having the build tool `mvn`/`cargo`/`go` being itself wrapped in shell scripts (or even another + build tool like Bazel!) + +Mill's direct style code and use of a general-purpose language makes it unique among +build tools, just like how React.js was unique among UI frameworks when it was first released +in 2014. With these two key design features, Mill makes understanding and maintaining your build +an order of magnitude easier than traditional tools, democratizing project builds so anyone +can contribute without needing to be experts. + +## Where can Mill Go? + +Above, we discussed some of the unique design decisions of Mill, and the value they +provide to users. In this section we will discuss where Mill can fit into the larger +build-tool ecosystem. +I think Mill has legs to potentially grow 10x to 100x bigger than it is today. There are +three main areas where I think Mill can grow into: + +### A Modern Java/JVM Build Tool + +Mill is a JVM build tool, and the JVM platform hosts many rich communities and ecosystems: +the Java folks, offshoots like Android, other languages like Kotlin and Scala. All these +ecosystems rely on tools like Maven or Gradle to build their code, and I believe Mill +can provide a better alternative. Even today, there are already many advantages of +using Mill over the incumbent build tools: + +1. Mill today runs the equivalent local workflows xref:comparisons/maven.adoc[4-10x faster than Maven] + and xref:comparisons/gradle.adoc[2-4x faster than Gradle], with automatic parallelization and caching for + every part of your build + +2. Mill today provides better ease of use than Maven or Gradle, with IDE support for + navigating your build graph and visualizing what your build is doing + +3. Mill today makes extending your build 10x easier than Maven or Gradle, directly + using the same JVM libraries you already know without being beholden to third-party plugins + +The JVM is a flexible platform, and although Java/Kotlin/Scala/Android +are superficially different, underneath there is a ton of similarity. Concepts like +classfiles, jars, assemblies, classpaths, dependency management and publishing +artifacts, IDEs, debuggers, profilers, many third-party libraries, are all shared and identical +between the various JVM languages. Mill provides a first class Java and Scala experience, +with growing support for Kotlin and Android. Mill's easy extensibility +means integrating new tools into Mill takes hours rather than days or weeks. + +In the last 15-20 years, we have learned a lot about build tooling, and the field +has developed significantly: + +* https://bazel.build/[Bazel], https://buck.build/[Buck], https://www.pantsbuild.org/[Pants] + have emerged to manage large codebases +* https://webpack.js.org/[Webpack], https://www.snowpack.dev/[Snowpack], https://esbuild.github.io/[ESBuild], + https://nx.dev/[Nx], https://turbo.build/[TurboRepo], https://vite.dev/[Vite] have emerged for Javascript +* https://astral.sh/[Astral], https://python-poetry.org/[Poetry], and others have emerged for Python +* We have seen papers published like https://www.microsoft.com/en-us/research/uploads/prod/2018/03/build-systems.pdf[Build Systems A La Carte], + that thoroughly explore the design space for how a build tool might work. + +But there are no build tools in the Java/JVM ecosystem that really take advantage of these +newer designs and techniques: ideas like having a build graph, automatic caching, automatic +parallelization, side-effect-free build tasks, and so on. While Maven (from 2004) and Gradle +(2008) have been slowly trying to move in these directions, they are also constrained by +their two decades of legacy that limits how fast they can evolve. + +Mill could be the modern Java/JVM build tool: providing 10x speedups over Maven or Gradle, +10x better ease of use, 10x better extensibility. Today Mill already provides a compelling +Java build experience. With some focused effort, I think Mill can be not just a _good_ +option, but the _better_ option for Java projects going forward! + +### An Easier Monorepo Build Tool + +Many companies are using Bazel today. Of the companies I interviewed from my Silicon Valley +network, 25 out of 30 are using or trying to use Bazel. +Bazel is an incredibly powerful tool: it provides https://bazel.build/docs/sandboxing[sandboxing], +parallelization, https://bazel.build/remote/caching[remote caching], +https://bazel.build/remote/rbe[remote execution]. These are all things that are +useful or even necessary as your organization and codebase grows. I even wrote about the +benefits on my company blog at the time: + +* https://www.databricks.com/blog/2019/02/27/speedy-scala-builds-with-bazel-at-databricks.html[Speedy Scala Builds with Bazel at Databricks] +* https://www.databricks.com/blog/2019/07/23/fast-parallel-testing-at-databricks-with-bazel.html[Fast Parallel Testing with Bazel at Databricks] + +There is no doubt that if set up correctly, Bazel is a great experience that "just +works", and with a single command you can do anything that you could want to do in a codebase. + +But of those 25 companies I interviewed, basically everyone was having a hard time adopting Bazel. +From my own experience, both of my prior employers (Dropbox and Databricks) both took +`O(1 person decade)` of work to adopt Bazel. I have met _multiple_ Silicon Valley dev-tools teams that +spent months doing a Bazel proof-of-concept only to give up due to the difficulty. Bazel is +a ferociously complex tool, and although some of that complexity is inherent, much of it is +incidental, and some of it is to support projects at a scale beyond what most teams would encounter. + +I think there is room for a lightweight monorepo build tool that provides maybe 50% of Bazel's +functionality, but at 10% the complexity: + +* Most companies are not Google, don't operate at Google-scale, do not have Google-level + problems, and may not need all the most advanced features that Bazel provides + +* Bazel itself is not getting any simpler over time - instead is getting more complex with + additional features and functionality, as tends to happen to projects over time + +Mill provides many of the same things Bazel does: automatic xref:depth/execution-model.adoc[caching], +parallelization, xref:depth/sandboxing.adoc[sandboxing], +xref:extending/import-ivy-plugins.adoc[extensibility]. Mill +can already work with a wide variety of programming languages, +from JVM languages like xref:javalib/intro.adoc[Java]/xref:scalalib/intro.adoc[Scala]/xref:kotlinlib/intro.adoc[Kotlin] +to xref:extending/example-typescript-support.adoc[Typescript] and +xref:extending/example-python-support.adoc[Python]. Mill's features are not as +highly-scalable as their Bazel equivalents, but they are provided in a lighter-weight, +easier-to-use fashion suitable for organizations with less than 1,000 engineers +who cannot afford the `O(1 person decade)` it takes to adopt Bazel in their organization. + +For most companies, their problems with Bazel aren't its scalability or feature set, +but its complexity. While Mill can never compete with Bazel for the largest-scale deployments +by its most sophisticated users, the bulk of users operate at a somewhat smaller scale and +need something easier than Bazel. Mill could be that easy monorepo build tool for them to use. + + +## Next Steps For Mill Going Forward + +10 years ago React.js democratized front-end Web UIs: what previously took intricate +surgery to properly wire up event handlers and UI mutations in three separate languages +became a straightforward task of naively returning the UI you want to render. Previously +challenging tasks (e.g. "make a loading bar that is kept in sync with the text on screen as +a file is uploaded") became trivial, and now anyone can probably fumble through a basic +interactive website without getting lost in callback hell. + +I think Mill has a chance to do the same thing for build systems. Like Web UIs 10 years ago, +configuring and maintaining a build-system often requires juggling multiple different +templating/config/scripting languages in an intricate dance of callbacks and filesystem +mutations. Like React.js, Mill collapses all this complexity, letting you write naive +"direct-style" code in a single language while getting all the benefits of caching and +parallelism, making previously challenging build pipelines implementations trivial. + +Fundamentally, there are holes in the build-tool market that are not well served: +the Java folks deserve something more modern than Maven or Gradle, and the Monorepo folks need +something easier to use than Bazel. I think Mill has a decent shot at occupying each +of these two niches, and even if it is only able to succeed in one that would +still be significant. Perhaps even significant enough to build a business around! + + +Going forward, I expect to pursue both paths: Mill as a better Java build tool, Mill as +an easier Monorepo build tool. Much of the past +quarter Q3 2024 has been spent polishing the experience of using Mill from Java, but +similar efforts will need to be made on the Monorepo front. I will be working on this full time +and also investing a significant amount of cash in order to support +the effort. If anyone out there is interested in being paid to work on the next-generation +of Java build tools or Monorepo build tools, let me know and we can try to make an arrangement! \ No newline at end of file diff --git a/docs/modules/ROOT/pages/comparisons/why-mill.adoc b/docs/modules/ROOT/pages/comparisons/why-mill.adoc new file mode 100644 index 00000000000..2bd8eb7b044 --- /dev/null +++ b/docs/modules/ROOT/pages/comparisons/why-mill.adoc @@ -0,0 +1,478 @@ += Why Use Mill? + +Mill is a fast build tool for Java, Scala, and Kotlin. Although the Java +compiler is very fast and the Java language is easy to learn, JVM build tools are +known to be slow and hard to use. Mill offers a better alternative: 2-10x faster +than Maven or Gradle on clean compiles, better IDE support, and extensibility without +needing plugins. This results in time savings due to less time waiting for or struggling +with your build tool and more time to focus on the actual work you need to do. + +At a first glance, Mill looks like any other build tool. You have build files, you configure +dependencies, you can compile, run, or test your project: + +```scala +// build.mill +package build +import mill._, javalib._ + +object foo extends JavaModule { + def ivyDeps = Agg( + ivy"net.sourceforge.argparse4j:argparse4j:0.9.0", + ivy"org.thymeleaf:thymeleaf:3.1.1.RELEASE" + ) + + object test extends JavaTests with TestModule.Junit4 +} +``` +```bash +> /mill foo.compile +compiling 1 Java source... + +> /mill foo.run --text hello +

hello

+ +> ./mill foo.test +Test foo.FooTest.testEscaping finished, ... +Test foo.FooTest.testSimple finished, ... +0 failed, 0 ignored, 2 total, ... +``` + +Beyond the basics, Mill provides 3 major advantages over other build tools. The comparison pages +for the respective build tool go into more detail (for xref:comparisons/maven.adoc[Maven], +xref:comparisons/gradle.adoc[Gradle], and xref:comparisons/sbt.adoc[SBT]), but at a high level +these advantages are: + +1. *Performance*: Mill offers a 2-10x speedup means less time waiting for your build tool, + meaning less time waiting for your build and more time doing useful work + +2. *Ease of Use*: Mill has better IDE support in IntelliJ and VSCode and richer + visualization tools than other tools, to help understand your build and what it is doing + +3. *Extensibility*: Mill lets you write code or use any published JVM library in your build, + customizing it to your needs without being limited by third-party plugins + +We will discuss each one in turn. + +## Performance + +### Maven + +Overall across our benchmarks, Mill is 4-10x faster than Maven for clean compiles, +both parallel and sequential, and for many modules or for a single module: + +|=== +| Benchmark | Maven | Mill | Speedup +| xref:comparisons/maven.adoc#_sequential_clean_compile_all[Sequential Clean Compile All] | 98.80s | 23.14s | 4.3x +| xref:comparisons/maven.adoc#_parallel_clean_compile_all[Parallel Clean Compile All] | 48.92s | 8.79s | 5.6x +| xref:comparisons/maven.adoc#_clean_compile_single_module[Clean Compile Single Module] | 4.89s | 1.11s | 4.4x +| xref:comparisons/maven.adoc#_incremental_compile_single_module[Incremental Compile Single Module] | 6.82s | 0.54s | 12.6x +| xref:comparisons/maven.adoc#_no_op_compile_single_module[No-Op Compile Single Module] | 5.25s | 0.47s | 11.2x +|=== + +First, let's look at *Parallel Clean Compile All*. +This benchmark involves running `clean` to delete all generated files and re-compiling +everything in parallel. Mill sees a significant ~6x speedup over Maven for this benchmark. +You can click on the link above to see a more detailed discussion of how this benchmark was +run. + +The second benchmark worth noting is *Incremental Compile Single Module*. +This benchmark involves making a single edit to a single already-compiled file in `common` - +adding a single newline to the end of the file - and re-compiling `common` and `common.test`. +Mill sees a huge ~12x speedup for this benchmark, because Mill's incremental compiler +(https://github.com/sbt/zinc[Zinc]) is able to detect that only one file in one module +has changed, and that the change is small enough +to not require other files to re-compile. In contrast, Maven re-compiles all files in both +modules, even though only one file was touched and the change was trivial. + + +### Gradle + +The comparison with Gradle is less stark, but still significant. Mill is 2-4x faster than Gradle +across the various workflows: + + +|=== +| Benchmark | Gradle | Mill | Speedup +| xref:comparisons/maven.adoc#_sequential_clean_compile_all[Sequential Clean Compile All] | 1s | 5.40s | 3.3x +| xref:comparisons/maven.adoc#_parallel_clean_compile_all[Parallel Clean Compile All] | 12.3s | 3.57s | 3.4x +| xref:comparisons/maven.adoc#_clean_compile_single_module[Clean Compile Single Module] | 4.41s | 1.20s | 3.7x +| xref:comparisons/maven.adoc#_incremental_compile_single_module[Incremental Compile Single Module] | 1.37s | 0.51s | 2.7x +| xref:comparisons/maven.adoc#_no_op_compile_single_module[No-Op Compile Single Module] | 0.94s | 0.46s | 2.0x +|=== + +Mill's various "clean compile" workflows 3-4x faster than Gradle's, while it's incremental +and no-op compile workflows are 2x faster. Both Gradle and Mill appear to do a good job +limiting the compilation to only the changed file, but Mill has less fixed overhead than +Gradle does, finishing in about ~0.5s rather than ~1.5 seconds. + +In general, these benchmarks don't show Mill doing anything that Maven or Gradle do not: +these are equivalent builds for the same projects (https://github.com/netty/netty[Netty] and +https://github.com/mockito/mockito[Mockito] respectively), compiling the same number of files +using the same Java compiler, in the same module structure and passing the same suite of tests. +Rather, what we are seeing is Mill simply having less build-tool overhead than Maven or Gradle, +so the performance of the underlying JVM and Java compiler (which is actually pretty fast!) can +really shine through. + + + +## Ease of Use + +The second area that Mill does well compared to tools like Maven or Gradle is in its ease +of use.This is not just in superficial things like the build file or command-line syntax, +but also in how Mill exposes how your build works and what your build is doing so you can +understand it and confidently make changes. We will consider three cases: the Mill Chrome +Profile, Mill Visualize, and Mill's IDE support + +### Chrome Profiles + +All Mill runs generate some debugging metadata files in `out/mill-*`. One of these +is `out/mill-chrome-profile.json`, which is a file following the Chrome Profiling format. +It can be loaded into any Chrome browser's built in `chrome://tracing` UI, to let you +interactively explore what Mill was doing during its last run. e.g. when performing a +clean compile on the Netty codebase, the profile ends up looking like this: + +image::comparisons/NettyCompileProfile.png[] + +The Chrome profile shows what task each Mill thread was executing throughout the run. +The Chrome profiling UI is interactive, so you can zoom in and out, or click on individual +tasks to show the exact duration and other metadata. + +But the real benefit of the Chrome profile isn't the low-level data it provides, but the +high-level view: + +* In the profile above, it is clear that for the first ~700ms, Mill is able + to use all cores on 10 cores on my laptop to do useful work. +* But after that, utilization is + much more sparse: `common.compile`, `buffer.compile`, `transport.compile`, `codec.compile`, + appear to wait for one another and run sequentially one after another. + + +This waiting is likely due to dependencies between them, and they take long enough that all +the other tasks depending on them get held up. For example, when `codec.compile` finishes +above, we can see a number of downstream tasks immediately start running. + +This understanding of your build's performance profile is not just an academic exercise, but +provides actionable information: + +* If I wanted faster Netty clean compiles, speeding up `common.compile`, `buffer.compile`, + `transport.compile`, or `codec.compile` would make the most impact. + +* On the other hand, time speeding up the various `codec-*.compile` tasks would help not at all: + these tasks are already running at a time where the CPUs are mostly idle. + +Most build tools do provide some way of analyzing build performance, but none of them provide +it as easily as Mill does: any Mill run generates a profile automatically, and any computer +with Chrome on it is able to load and let you explore that profile. That is a powerful tool to +help engineers understand what the build is doing: any engineer who felt a build was slow +can trivially load it into their Chrome browser to analyze and figure out what. + +### Mill Visualize + +Apart from the Mill Chrome Profile, Mill also provides the `./mill visualize` command, which +is useful to show the logical dependency graph between tasks. For example, we can use +`./mill visualize __.compile` (double underscore means wildcard) to +show the dependency graph between the modules of the Netty build below: + +image::comparisons/NettyCompileGraph.svg[] + +(_Right-click open-image-in-new-tab to see full size_) + +In this graph, we can clearly see that `common.compile`, `buffer.compile`, `transport.compile`, +and `codec.compile` depend on each other in a linear fashion. This explains why they each must +wait for the prior task to complete before starting, and cannot run in parallel with one another. +Furthermore, we can again confirm that many of the `codec-*.compile` tasks depend on `codec.compile`, +which is in the profile why we saw them waiting for the upstream task to complete before starting. + +Although these are things we could have _guessed_ from looking at the Chrome Profile above, +`./mill visualize` gives you a separate angle from which to look at your build. Together these +tools can help give greater understanding of what your build is doing and why it is doing that: +something that can be hard to come by with build tools that are often considered confusing and +inscrutable. + +### IDE Support + +One area that Mill does better than Gradle is providing a seamless IDE experience. For example, +consider the snippet below where we are using Gradle to configure the javac compiler options. +Due to `.gradle` files being untyped Groovy, the autocomplete and code-assist experience working +with these files is hit-or-miss. In the example below, we can see that IntelliJ is able to identify +that `compileArgs` exists and has the type `List`: + +image::comparisons/IntellijMockitoGradleCompileOptions.png[] + +But if you try to jump to definition or find out anything else about it you hit a wall: + +image::comparisons/IntellijMockitoGradleCompileOptions2.png[] + +Often working with build configurations feels like hitting dead ends: if you don't have +`options.compilerArgs` memorized in your head, there is literally nothing you can do in your editor to +make progress to figure out what it is or what it is used for. That leaves you googling +for answers, which can be a frustrating experience that distracts you from the task at hand. + +The fundamental problem with tools like Gradle is that the code you write does not +actually perform the build: rather, you are just setting up some data structure that +is used to configure the _real_ build engine that runs later. Thus when you explore +the Gradle build in an IDE, the IDE can only explore the configuration logic (the +`getCompilerArgs` method above) and is unable to explore the actual build logic (how +`getCompilerArgs` _actually gets used in Gradle_) + +In comparison, Mill's `.mill` files are all statically typed, and as a result IntelliJ is easily able to +pull up the documentation for `def javacOptions`, even though it doesn't have any special support +for Mill built into the IDE: + +image::comparisons/IntellijMockitoMillJavacOptionsDocs.png[] + +Apart from static typing, the way Mill builds are structured also helps the IDE: Mill +code _actually performs your build_, rather than configuring some opaque build engine. +While that sounds academic, one concrete consequence is that IntelliJ is able to take +your `def javacOptions` override and +find the original definitions that were overridden, and show you where they are defined: + +image::comparisons/IntellijMockitoMillJavacOptionsParents.png[] + +You can jump to any of the overriden `def`s quickly and precisely: + +image::comparisons/IntellijMockitoMillJavacOptionsDef.png[] + +Furthermore, because task dependencies in Mill are just normal method calls, IntelliJ is +able to _find usages_, showing you where the task is used. Below, we can see the method +call in the `def compile` task, which uses `javacOptions()` along with a number of other tasks: + +image::comparisons/IntellijMockitoMillCompile.png[] + +From there, if you are curious about any of the other tasks used alongside `javacOptions`, it's +easy for you to pull up _their_ documentation, jump to _their_ +definition, or find _their_ usages. For example we can pull up the docs of +`compileClasspath()` below, jump to _its_ implementation, and continue +interactively exploring your build logic from there: + +image::comparisons/IntellijMockitoMillCompileClasspath.png[] + +Unlike most other build tools, Mill builds can be explored interactively in your +IDE. If you do not know what something does, it's documentation, definition, or usages is always +one click away in IntelliJ or VSCode. This isn't a new experience for Java developers, as it +is what you would be used to day-to-day in your application code! But Mill brings that same +polished experience to your build system - traditionally something that has been opaque +and hard to understand - and does so in a way that no other build tool does. + + +## Extensibility + +Mill allows you to directly write code to configure your build, and even download libraries +from Maven Central. + +Most build tools need plugins to do anything: if you want to Foo you need a +Foo plugin, if you want to Bar you need a Bar plugin, for any possible Foo or Bar. These could +be simple tasks - zipping up files, pre-rendering web templates, preparing static assets for +deployment - but even a tasks that would be trivial to implement in a few lines of code requires +you to Google for third-party plugins, dig through their Github to see which one is best +maintained, and hope for the best when you include it in your build. And while you could +write plugins yourself, doing so is usually non-trivial. + +Mill is different. Although it does have plugins for more advanced integrations, for most +simple things you can directly write code to achieve what you want, using the bundled +filesystem, subprocess, and dependency-management libraries. And even if you need third-party +libraries from Maven Central to do Foo, you can directly import the "Foo" library and use it +directly, without having to find a "Foo build plugin" wrapper. + +### Simple Custom Tasks + +The following Mill build is a minimal Java module `foo`. It contains no custom configuration, and +so inherits all the defaults from `mill.javalib.JavaModule`: default source folder layout, default +assembly configuration, default compiler flags, and so on. + +```scala +package build +import mill._, javalib._ + +object foo extends JavaModule { +} +``` +```bash +> mill compile +Compiling 1 Java source... +``` + +If you want to add a custom task, this is as simple as defining a method e.g. +`def lineCount = Task { ... }`. The body of `Task` performs the action we want, and +can depend on other tasks such as `allSourceFiles()` below: + +```scala +package build +import mill._, javalib._ + +object foo extends JavaModule { + /** Total number of lines in module source files */ + def lineCount = Task { + allSourceFiles().map(f => os.read.lines(f.path).size).sum + } +} +``` + +Once we define a new task, we can immediately begin using it in our build. +`lineCount` is not used by any existing `JavaModule` tasks, but we can still +show its value via the Mill command line to force it to evaluate: + +```bash +> mill show foo.lineCount +17 +``` + +Note that as `lineCount` is a `Task`, we get automatic caching, invalidation, and +parallelization: these are things that every `Task` gets for free, without the task +author to do anything. And although we wrote the `lineCount` logic in the main +`build.mill` file for this example, if it grows complex enough to get messy it is +easy to move it to your own xref:extending/writing-plugins.adoc[custom plugins] + +### Overriding Tasks + +To wire up `lineCount` into our main `JavaModule` `compile`/`test`/`run` tasks, +one way is to take the line count value and write it to a file in `def resources`. +This file can then be read at runtime as a JVM resource. We do that below +by overriding `def resources` and making it depend on `lineCount`, in addition +to its existing value `super.resources()`: + +```scala +package build +import mill._, javalib._ + +object foo extends JavaModule { + /** Total number of lines in module source files */ + def lineCount = Task { + allSourceFiles().map(f => os.read.lines(f.path).size).sum + } + + /** Generate resources using lineCount of sources */ + override def resources = Task { + os.write(Task.dest / "line-count.txt", "" + lineCount()) + super.resources() ++ Seq(PathRef(Task.dest)) + } +} +``` + + +Because our `def resources` overrides the existing `resources` method inherited from `JavaModule`, +the downstream tasks automatically now use the new override instead, as that is how overrides +work. That means if you call `mill foo.run`, it will automatically pick up the new `resources` +including the generated `line-count.txt` file and make it available to +the application code to use e.g. to print it out at runtime: + +```bash +> mill foo.run +Line Count: 18 +``` + +Next, we'll look at a more realistic example, +which includes usage of third-party libraries in the build. + +### Using Libraries from Maven Central in Tasks + +Earlier on we discussed possibly pre-rendering HTML pages in the build so they can be +served at runtime. The use case for this are obvious: if a page never changes, rendering +it on every request is wasteful, and even rendering it once and then caching it can impact +your application startup time. Thus, you may want to move some HTML rendering to build-time, +but with traditional build tools such a move is sufficiently inconvenient and complicated +that people do not do it. + +With Mill, pre-rendering HTML at build time is really easy, even if you need a third-party +library. Mill does not ship with a bundled HTML templating engine, but you can use the +`import $ivy` syntax to include one such as Thymeleaf, which would immediately make the +Thymeleaf classes available for you to import and use in your build as below: + + +```scala +package build +import mill._, javalib._ +import $ivy.`org.thymeleaf:thymeleaf:3.1.1.RELEASE` +import org.thymeleaf.TemplateEngine +import org.thymeleaf.context.Context +object foo extends JavaModule { + def htmlSnippet = Task { + val context = new Context() + context.setVariable("heading", "hello") + new TemplateEngine().process( + "

", + context + ) + } + def resources = Task.Sources{ + os.write(Task.dest / "snippet.txt", htmlSnippet()) + super.resources() ++ Seq(PathRef(Task.dest)) + } +} +``` + +Once we have run `import $ivy`, we can import `TemplateEngine`, `Context`, and replace our +`def lineCount` with a `def htmlSnippet` task that uses Thymeleaf to render HTML. Again, +we get full IDE support for working with the Thymeleaf Java API, the new `htmlSnippet` task +is inspectable from the Mill command line via `show`, and we wire it up into +`def resources` so it can be inspected and used at runtime by the application +(in this case just printed out): + + +```bash +> mill show foo.htmlSnippet +"

hello

" + +> mill foo.compile +compiling 1 Java source... +... + +> mill foo.run +generated snippet.txt resource:

hello

+``` + +Rendering HTML using the Thymeleaf templating engine is not rocket science, but what is +interesting here is what we did _not_ need to do: + +* We did _not_ need to find a Thymeleaf-Mill plugin in order to include Thymeleaf in our + build + +* We did _not_ need to learn a special API or framework for authoring build plugins ourselves + to write a plugin to include Thymeleaf in our build + +* We did _not_ need to add fragile shell scripts to augment our build logic and + implement the functionality we need. + + +Instead, we could simply import Thymeleaf directly from Maven Central and use it just +like we would use it in any Java application, with IDE support, typechecking, +and automatic parallelism and caching. + +''' + + +Most real projects require some kind of ad-hoc build tasks: you may be pre-processing static +assets for web deployment, embedding build metadata for runtime debugging, or pre-rendering +HTML pages to optimize performance at runtime. With most build tools, you often needed to pull +in some poorly-maintained plugin off of Github, write your own using a complicated plugin +framework, or even wrap your build tool in ad-hoc shell scripts. With most other build tools, +caching and parallelism are things that the build author needs to use manually, meaning nobody +gets it right and your build performance is never as good as it could be. + +In contrast, Mill makes it easy it is to write concise type-checked code to perform ad-hoc tasks +to do whatever you need to do. You get full IDE support, automatic caching and +parallelism, and access to the huge JVM library ecosystem on Maven Central. +Rather than grabbing unmaintained plugins off of Github or augmenting your build +with fragile shell scripts, Mill allows your own custom logic to be implemented +in a way that is flexible, performant, and safe, such that anyone can configure their +build correctly and achieve maximum performance even without being a build tool expert. + +## Conclusion + +To wrap up, Mill does all the same things that other build tools like Maven or Gradle do, +but aims to do them better: faster, easier to use, and easier to extend. + +Build systems have traditionally been mysterious black boxes that only experts could work +with: slow for unknown reasons, with cargo-culted configuration and usage commands, +and challenging for normal application developers to contribute improvements to. +Mill flips this on its head, democratizing your build system such that even non-experts +are able to contribute, and can do so safely and easily such that your build workflows +achieve their maximum possible performance. + +The rest of this doc-site contains more Mill build tool comparisons +(with xref:comparisons/maven.adoc[Maven], xref:comparisons/gradle.adoc[Gradle], +xref:comparisons/sbt.adoc[SBT]), with getting started instructions +for using Mill with xref:javalib/intro.adoc[Java], with xref:scalalib/intro.adoc[Scala], +or with xref:kotlinlib/intro.adoc[Kotlin], and detailed documentation for how Mill +works. Please try it out and let us know in the +https://github.com/com-lihaoyi/mill/discussions[discussions forum] how it goes! diff --git a/docs/modules/ROOT/pages/depth/design-principles.adoc b/docs/modules/ROOT/pages/depth/design-principles.adoc new file mode 100644 index 00000000000..29e5508bf24 --- /dev/null +++ b/docs/modules/ROOT/pages/depth/design-principles.adoc @@ -0,0 +1,437 @@ += Mill Design Principles +:page-aliases: Mill_Internals.adoc, Mill_Design_Principles.adoc + +include::partial$gtag-config.adoc[] + +The following external resources give more of a flavor of the architecture behind +Mill: + +* https://www.youtube.com/watch?v=UsXgCeU-ovI[Video: A Deep Dive into the Mill Build Tool] +* https://www.lihaoyi.com/post/SoWhatsSoSpecialAboutTheMillScalaBuildTool.html[Blog Post: What's So Special About The Mill Scala Build Tool?] +* https://www.youtube.com/watch?v=j6uThGxx-18[Video: Mill a Build Tool based on Pure Functional Programming] +* http://www.lihaoyi.com/post/BuildToolsasPureFunctionalPrograms.html[Blog Post: Build Tools as Pure Functional Programs] +* http://www.lihaoyi.com/post/SowhatswrongwithSBT.html[Blog Post: So, what's wrong with SBT?] + +== Principles + +=== Dependency graph first + +Mill's most important abstraction is the dependency graph of ``Task``s. +Constructed using the `T {...}` `Task.Anon {...}` `Task.Command {...}` syntax, these +track the dependencies between steps of a build, so those steps can be executed +in the correct order, queried, or parallelized. + +While Mill provides helpers like `ScalaModule` and other things you can use to +quickly instantiate a bunch of related tasks (resolve dependencies, find +sources, compile, package into jar, ...) these are secondary. When Mill +executes, the dependency graph is what matters: any other mode of organization +(hierarchies, modules, inheritance, etc.) is only important to create this +dependency graph of ``Task``s. + +=== Builds are hierarchical + +The syntax for running tasks from the command line `mill Foo.bar.baz` is +the same as referencing a task in Scala code, `Foo.bar.baz` + +Everything that you can run from the command line lives in an object hierarchy +in your `build.mill` file. Different parts of the hierarchy can have different +``Task``s available: just add a new `def foo = Task {...}` somewhere and you'll be +able to run it. + +Cross builds, using the `Cross` data structure, are just another kind of node in +the object hierarchy. The only difference is syntax: from the command line you'd +run something via `mill core.cross[a].printIt` while from code you use +`core.cross("a").printIt` due to different restrictions in Scala/Bash syntax. + +=== Caching by default + +Every `Task` in a build, defined by `def foo = Task {...}`, is cached by default. +Currently this is done using a `foo.json` file in the `out/` folder. The +`Task` is also provided a `foo.dest/` path on the filesystem dedicated to it, for +it to store output files etc. + +This happens whether you want it to or not. Every `Task` is cached, not just +the "slow" ones like `compile` or `assembly`. + +Caching is keyed on the `.hashCode` of the returned value. For ``Task``s +returning the contents of a file/folder on disk, they return `PathRef` instances +whose hashcode is based on the hash of the disk contents. Serialization of the +returned values is done using uPickle. + +=== Functional Purity + +Mill relies heavily on build tasks being "pure": they only depend on their +input tasks, and their only output is their return value. They do not +scribble all over the filesystem, reading and writing from random places. That +is what allows us to be aggressive about caching and parallelizing the +evaluation of build tasks during a build. + +Many kinds of build steps do require files on disk, and for that Mill provides +the `Task.dest` folder. This is a folder on disk dedicated to each build task, +so that it can read and write things to it without worrying about conflicts +with other tasks that have their own `Task.dest` folders. In effect, this makes +even file output "pure": we can know precisely where a task's output files +live when we need to invalidate them, and it allows multiple tasks all +reading and writing to the filesystem to do so safely even when in parallel. + +=== Short-lived build processes + +The Mill build process is meant to be run over and over, not only as a +long-lived daemon/console. That means we must minimize the startup time of the +process, and that a new process must be able to re-construct the in-memory data +structures where a previous process left off, in order to continue the build. + +Re-construction is done via the hierarchical nature of the build: each `Task` +`foo.bar.baz` has a fixed position in the build hierarchy, and thus a fixed +position on disk `out/foo/bar/baz.json`. When the old process dies and a +new process starts, there will be a new instance of `Task` with the same +implementation code and same position in the build hierarchy: this new `Task` +can then load the `out/foo/bar/baz.json` file and pick up where the +previous process left off. + +Minimizing startup time means aggressive caching, as well as minimizing the +total amount of bytecode used: Mill's current 1-2s startup time is dominated by +JVM classloading. By default Mill uses a long-lived compile server to speed +things up even more, but ensuring that the "from scratch" performance remains +good is a core ongoing requirement. + +=== Static dependency graph and Applicative tasks + +``Task``s are _Applicative_, not _Monadic_. There is `.map`, `.zip`, but no +`.flatMap` operation. That means that we can know the structure of the entire +dependency graph before we start executing ``Task``s. This lets us perform all +sorts of useful operations on the graph before running it: + +* Given a Task the user wants to run, pre-compute and display what tasks + will be evaluated ("dry run"), without running them + +* Automatically parallelize different parts of the dependency graph that do not + depend on each other, perhaps even distributing it to different worker + machines like Bazel/Pants can + +* Visualize the dependency graph easily, e.g. by dumping to a DOT file + +* Query the graph, e.g. "why does this thing depend on that other thing?" + +* Avoid running tasks "halfway": if a Task's upstream Tasks fail, we can + skip the Task completely rather than running halfway and then bailing out + with an exception + +In order to avoid making people using `.map` and `.zip` all over the place when +defining their ``Task``s, we use the `T {...}`/`Task.Anon {...}`/`Task.Command {...}` +macros which allow you to use `Task#apply()` within the block to "extract" a +value. + +[source,scala] +---- +def test() = Task.Command { + TestRunner.apply( + "mill.UTestFramework", + runDepClasspath().map(_.path) :+ compile().path, + Seq(compile().path) + +} + +---- + +This is roughly equivalent to the following: + +[source,scala] +---- +def test() = Task.Command { T.zipMap(runDepClasspath, compile, compile) { + (runDepClasspath1, compile2, compile3) => + TestRunner.apply( + "mill.UTestFramework", + runDepClasspath1.map(_.path) :+ compile2.path, + Seq(compile3.path) + ) +} + +---- + +This is similar to SBT's `:=`/`.value` macros, or ``scala-async``'s +`async`/`await`. Like those, the `T {...}` macro should let users program most of +their code in a "direct" style and have it "automatically" lifted into a graph +of ``Task``s. + +== How Mill aims for Simple + +Why should you expect that the Mill build tool can achieve simple, easy & +flexible, where other build tools in the past have failed? + +Build tools inherently encompass a huge number of different concepts: + +* What "Tasks" depends on what? +* How do I define my own tasks? +* Where do source files come from? +* What needs to run in what order to do what I want? +* What can be parallelized and what can't? +* How do tasks pass data to each other? What data do they pass? +* What tasks are cached? Where? +* How are tasks run from the command line? +* How do you deal with the repetition inherent in a build? (e.g. compile, run & + test tasks for every "module") +* What is a "Module"? How do they relate to "Tasks"? +* How do you configure a Module to do something different? +* How are cross-builds (across different configurations) handled? + +These are a lot of questions to answer, and we haven't even started talking +about the actually compiling/running any code yet! If each such facet of a build +was modelled separately, it's easy to have an explosion of different concepts +that would make a build tool hard to understand. + +Before you continue, take a moment to think: how would you answer to each of +those questions using an existing build tool you are familiar with? Different +tools like http://www.scala-sbt.org/[SBT], +https://fake.build[Fake], https://gradle.org/[Gradle] or +https://gruntjs.com/[Grunt] have very different answers. + +Mill aims to provide the answer to these questions using as few, as familiar +core concepts as possible. The entire Mill build is oriented around a few +concepts: + +* The Object Hierarchy +* The Call Graph +* Instantiating Traits & Classes + +These concepts are already familiar to anyone experienced in Scala (or any other +programming language…), but are enough to answer all of the complicated +build-related questions listed above. + +=== The Object Hierarchy + +```graphviz +digraph G { + node [shape=box width=0 height=0 style=filled fillcolor=white] + bgcolor=transparent + "root-module" [style=dashed] + foo1 [style=dashed] + foo2 [style=dashed] + "root-module" -> foo1 -> "foo1.bar" [style=dashed] + foo1 -> "foo1.qux" [style=dashed] + "root-module" -> foo2 -> "foo2.bar" [style=dashed] + foo2 -> "foo2.qux" [style=dashed] + foo2 -> "foo2.baz" [style=dashed] +} +``` + +The module hierarchy is the graph of objects, starting from the root of the +`build.mill` file, that extend `mill.Module`. At the leaves of the hierarchy are +the ``Task``s you can run. + +A ``Task``'s position in the module hierarchy tells you many things. For +example, a `Task` at position `core.test.compile` would: + +* Cache output metadata at `out/core/test/compile.json` + +* Output files to the folder `out/core/test/compile.dest/` + +* Source files default to a folder in `core/test/`, `core/test/src/` + +* Be runnable from the command-line via `mill core.test.compile` + +* Be referenced programmatically (from other ``Task``s) via `core.test.compile` + +From the position of any `Task` within the object hierarchy, you immediately +know how to run it, find its output files, find any caches, or refer to it from +other ``Task``s. You know up-front where the ``Task``s data "lives" on disk, and +are sure that it will never clash with any other ``Task``s data. + +=== The Call Graph + +```graphviz +digraph G { + rankdir=LR + node [shape=box width=0 height=0 style=filled fillcolor=white] + bgcolor=transparent + newrank=true; + subgraph cluster_0 { + style=dashed + node [shape=box width=0 height=0 style=filled fillcolor=white] + label = "foo.bar"; + + "foo.bar.sources" -> "foo.bar.compile" -> "foo.bar.classPath" -> "foo.bar.assembly" + "foo.bar.mainClass" -> "foo.bar.assembly" + } + subgraph cluster_1 { + style=dashed + node [shape=box width=0 height=0 style=filled fillcolor=white] + label = "foo"; + + "foo.bar.classPath" -> "foo.compile" [constraint=false]; + "foo.bar.classPath" -> "foo.classPath" + "foo.sources" -> "foo.compile" -> "foo.classPath" -> "foo.assembly" + "foo.mainClass" -> "foo.assembly" + } + subgraph cluster_2 { + style=dashed + node [shape=box width=0 height=0 style=filled fillcolor=white] + label = "qux"; + + "qux.mainClass" -> "qux.assembly" + "foo.classPath" -> "qux.compile" [constraint=false]; + "foo.classPath" -> "qux.classPath" + "qux.sources" -> "qux.compile" -> "qux.classPath" -> "qux.assembly" + } +} +``` + +The Scala call graph of "which task references which other task" is core to +how Mill operates. This graph is reified via the `T {...}` macro to make it +available to the Mill execution engine at runtime. The call graph tells you: + +* Which ``Task``s depend on which other ``Task``s + +* For a given `Task` to be built, what other ``Task``s need to be run and in + what order + +* Which ``Task``s can be evaluated in parallel + +* What source files need to be watched when using `--watch` on a given task (by + tracing the call graph up to the ``Source``s) + +* What a given `Task` makes available for other ``Task``s to depend on (via + its return value) + +* Defining your own task that depends on others is as simple as `def foo = + T {...}` + +The call graph within your Scala code is essentially a data-flow graph: by +defining a snippet of code: + +[source,scala] +---- +val b = ... +val c = ... +val d = ... +val a = f(b, c, d) +---- + +you are telling everyone that the value `a` depends on the values of `b` `c` and +`d`, processed by `f`. A build tool needs exactly the same data structure: +knowing what `Task` depends on what other ``Task``s, and what processing it +does on its inputs! + +With Mill, you can take the Scala call graph, wrap everything in the `T {...}` +macro, and get a `Task`-dependency graph that matches exactly the call-graph +you already had: + +[source,scala] +---- +def b = Task { ... } +def c = Task { ... } +def d = Task { ... } +def a = Task { f(b(), c(), d()) } +---- + +Thus, if you are familiar with how data flows through a normal Scala program, +you already know how data flows through a Mill build! The Mill build evaluation +may be incremental, it may cache things, it may read and write from disk, but +the fundamental syntax, and the data-flow that syntax represents, is unchanged +from your normal Scala code. + +=== Instantiating Traits & Classes + +Classes and traits are a common way of re-using common data structures in Scala: +if you have a bunch of fields which are related and you want to make multiple +copies of those fields, you put them in a class/trait and instantiate it over +and over. + +In Mill, inheriting from traits is the primary way for re-using common parts of +a build: + +* Scala "project"s with multiple related ``Task``s within them, are just a + `Trait` you instantiate + +* Replacing the default ``Target``s within a project, making them do new + things or depend on new ``Target``s, is simply `override`-ing them during + inheritance + +* Modifying the default ``Target``s within a project, making use of the old value + to compute the new value, is simply `override`ing them and using `super.foo()` + +* Required configuration parameters within a `project` are `abstract` members + +* Cross-builds are modelled as instantiating a (possibly anonymous) class + multiple times, each instance with its own distinct set of ``Target``s + +In normal Scala, you bundle up common fields & functionality into a `class` you +can instantiate over and over, and you can override the things you want to +customize. Similarly, in Mill, you bundle up common parts of a build into +``trait``s you can instantiate over and over, and you can override the things you +want to customize. "Subprojects", "cross-builds", and many other concepts are +reduced to simply instantiating a `trait` over and over, with tweaks. + +== Prior Work + +=== SBT + +Mill is built as a substitute for SBT, whose problems are +http://www.lihaoyi.com/post/SowhatswrongwithSBT.html[described here]. +Nevertheless, Mill takes on some parts of SBT (builds written in Scala, Task +graph with an Applicative "idiom bracket" macro) where it makes sense. + +=== Bazel + +Mill is largely inspired by https://bazel.build/[Bazel]. In particular, the +single-build-hierarchy, where every Target has an on-disk-cache/output-folder +according to their position in the hierarchy, comes from Bazel. + +Bazel is a bit odd in its own right. The underlying data model is good +(hierarchy + cached dependency graph) but getting there is hell. It (like SBT) is +also a 3-layer interpretation model, but layers 1 & 2 are almost exactly the +same: mutable python which performs global side effects (layer 3 is the same +dependency-graph evaluator as SBT/mill). + +You end up having to deal with a non-trivial python codebase where everything +happens via: + +[source,python] +---- +do_something(name="blah") +---- + +or + +[source,python] +---- +do_other_thing(dependencies=["blah"]) + +---- + +where `"blah"` is a global identifier that is often constructed programmatically +via string concatenation and passed around. This is quite challenging. + +Having the two layers be “just python” is great since people know python, but I +think it's unnecessary to have two layers ("evaluating macros" and "evaluating rule +impls") that are almost exactly the same, and I think making them interact via +return values rather than via a global namespace of programmatically-constructed +strings would make it easier to follow. + +With Mill, I’m trying to collapse Bazel’s Python layer 1 & 2 into just 1 layer +of Scala, and have it define its dependency graph/hierarchy by returning +values, rather than by calling global-side-effecting APIs. I've had trouble +trying to teach people how-to-bazel at work, and am pretty sure we can make +something that's easier to use. + +=== Scala.Rx + +Mill's "direct-style" applicative syntax is inspired by my old +https://github.com/lihaoyi/scala.rx[Scala.Rx] project. While there are +differences (Mill captures the dependency graph lexically using Macros, Scala.Rx +captures it at runtime), they are pretty similar. + +The end-goal is the same: to write code in a "direct style" and have it +automatically "lifted" into a dependency graph, which you can introspect and use +for incremental updates at runtime. + +Scala.Rx is itself build upon the 2010 paper +https://infoscience.epfl.ch/record/148043/files/DeprecatingObserversTR2010.pdf[Deprecating the Observer Pattern]. + +=== CBT + +Mill looks a lot like https://github.com/cvogt/cbt[CBT]. The inheritance based +model for customizing ``Module``s/``ScalaModule``s comes straight from there, as +does the "command line path matches Scala selector path" idea. Most other things +are different though: the reified dependency graph, the execution model, the +caching module all follow Bazel more than they do CBT diff --git a/docs/modules/ROOT/pages/depth/execution-model.adoc b/docs/modules/ROOT/pages/depth/execution-model.adoc new file mode 100644 index 00000000000..fb67118b27f --- /dev/null +++ b/docs/modules/ROOT/pages/depth/execution-model.adoc @@ -0,0 +1,426 @@ += The Mill Execution Model +:page-aliases: The_Mill_Evaluation_Model.adoc, depth/evaluation-model.adoc + +include::partial$gtag-config.adoc[] + +This page does a deep dive on how Mill evaluates your build tasks, so you can better understand +what Mill is doing behind the scenes when building your project. + +## Example Project + +For the purposes of this article, we will be using the following example build +as the basis for discussion: + +```scala +// build.mill +package build +import mill._, javalib._ + +object foo extends JavaModule {} + +object bar extends JavaModule { + def moduleDeps = Seq(foo) + + /** Total number of lines in module source files */ + def lineCount = Task { + allSourceFiles().map(f => os.read.lines(f.path).size).sum + } + + /** Generate resources using lineCount of sources */ + override def resources = Task { + os.write(Task.dest / "line-count.txt", "" + lineCount()) + Seq(PathRef(Task.dest)) + } +} +``` + +This is a simple two-module build with two ``JavaModule``s, one that depends on the other. +There is a custom task `bar.lineCount` implemented that replaces the default `resources/` +folder with a generated resource file for use at runtime, as a simple example of a +xref:javalib/intro.adoc#_custom_build_logic[Custom Build Logic]. + +This expects the source layout: + +``` +foo/ + src/ + *.java files + package.mill (optional) +bar/ + src/ + *.java files + package.mill (optional) +build.mill +``` + +You can operate on this build via commands such as + +```bash +> ./mill bar.compile + +> ./mill foo.run + +> ./mill _.assembly # evaluates both foo.compile and bar.compile +``` + + +For the purposes of this article, we will consider what happens when you run +`./mill _.assembly` on the above example codebase. + +## Primary Phases + +### Compilation + +Initial `.mill` build files: + +```bash +bar/ + package.mill #optional +foo/ + package.mill #optional +build.mill +``` + + +This stage involves compiling your `build.mill` and any +xref:large/multi-file-builds.adoc[subfolder package.mill files] into JVM classfiles. +Mill build files are xref:depth/why-scala.adoc[written in Scala], so this is done +using the normal Mill Scala compilation toolchain (`mill.scalalib.ScalaModule`), with +some minor pre-processing to turn `.mill` files into valid `.scala` files. + +Compilation of your build is _global_ but _incremental_: running any `./mill` command +requires that you compile all `build.mill` and `package.mill` files in your entire +project, which can take some time the first time you run a `./mill` command in a project. +However, once that is done, updates to any `.mill` file are re-compiled incrementally, +such that updates can happen relatively quickly even in large projects. + +After compilation, the `.mill` files are converted into JVM classfiles as shown below: + + +```bash +bar/ + package.class +foo/ + package.class +build.class +``` + + +These classfiles are dynamically loaded into the Mill process and instantiated into +a concrete Mill `RootModule` object, which is then used in the subsequent tasks below: + +### Resolution + +Resolution converts the Mill xref:cli/query-syntax.adoc[task selector] ``_.assembly`` the list of +xref:fundamentals/tasks.adoc[] given from the command line. This explores the `build` and `package` +files generated in the <> step above, instantiates the xref:fundamentals/modules.adoc[Modules] +and xref:fundamentals/tasks.adoc[Tasks] as necessary, and returns a list of the final tasks that +were selected by selector: + +```graphviz +digraph G { + node [shape=box width=0 height=0 style=filled fillcolor=white] + bgcolor=transparent + newrank=true; + build -> foo -> "foo.assembly" + build -> bar -> "bar.assembly" +} +``` + +Mill starts from the `RootModule` instantiated after <>, and uses +Java reflection to walk the tree of modules and tasks to find the tasks that match +your given selector. + +Task and module resolution is _lazy_, so only modules that are required by the given +selector `_.assembly` are instantiated. This can help keep task resolution fast even +when working within a large codebase by avoiding instantiation of modules that are +unrelated to the selector you are running. + + +### Planning + +Planning is the step of turning the tasks selected during <> into a full +build graph that includes all transitive upstream dependencies. This is done by +traversing the graph of task dependencies, and generates a (simplified) task graph +as shown below: + +```graphviz +digraph G { + rankdir=LR + node [shape=box width=0 height=0 style=filled fillcolor=white] + bgcolor=transparent + newrank=true; + subgraph cluster_0 { + style=dashed + node [shape=box width=0 height=0 style=filled fillcolor=white] + label = "foo"; + + "foo.sources" -> "foo.compile" -> "foo.classPath" -> "foo.assembly" + "foo.resources" -> "foo.assembly" + "foo.classPath" + } + subgraph cluster_1 { + style=dashed + node [shape=box width=0 height=0 style=filled fillcolor=white] + label = "bar"; + + + "bar.sources" -> "bar.compile" -> "bar.classPath" -> "bar.assembly" + + "bar.sources" -> "bar.lineCount" -> "bar.resources" -> "bar.assembly" + } + "foo.classPath" -> "bar.compile" [constraint=false] + "foo.classPath" -> "bar.classPath" +} +``` + +In this graph, we can see that even though <> only selected `foo.assembly` +and `bar.assembly`, their upstream task graph requires tasks such as `foo.compile`, +`bar.compile`, as well as our custom task `bar.lineCount` and our override of `bar.resources`. + + +### Evaluation + +The last phase is execution. Execution depends not only on the tasks you selected at the +command line, and those discovered during <>, but also what input files changed +on disk. Tasks that were not affected by input +changes may have their value loaded from cache (if already evaluated earlier) or skipped entirely +(e.g. due to xref:large/selective-execution.adoc[Selective Execution]). + +For example, a change to `foo/src/*.java` would affect the `foo.sources` task, which +would invalidate and cause evaluation of the tasks highlighted in red below: + +```graphviz +digraph G { + rankdir=LR + node [shape=box width=0 height=0 style=filled fillcolor=white] + bgcolor=transparent + newrank=true; + subgraph cluster_0 { + style=dashed + node [shape=box width=0 height=0 style=filled fillcolor=white] + label = "foo"; + + "foo.sources" -> "foo.compile" -> "foo.classPath" -> "foo.assembly" [color=red, penwidth=2] + "foo.resources" -> "foo.assembly" + "foo.classPath" + "foo.sources" [color=red, penwidth=2] + + "foo.assembly" [color=red, penwidth=2] + "foo.compile" [color=red, penwidth=2] + "foo.classPath" [color=red, penwidth=2] + } + subgraph cluster_1 { + style=dashed + node [shape=box width=0 height=0 style=filled fillcolor=white] + label = "bar"; + + + "bar.sources" -> "bar.compile" -> "bar.classPath" + "bar.classPath" -> "bar.assembly" [color=red, penwidth=2] + + "bar.classPath" [color=red, penwidth=2] + "bar.assembly" [color=red, penwidth=2] + "bar.sources" -> "bar.lineCount" -> "bar.resources" -> "bar.assembly" + } + "foo.classPath" -> "bar.compile" [constraint=false] + "foo.classPath" -> "bar.classPath" [color=red, penwidth=2] +} +``` + +On the other hand a change to `bar/src/*.java` would affect the `bar.sources` task, which +would invalidate and cause evaluation of the tasks highlighted in red below: + +```graphviz +digraph G { + rankdir=LR + node [shape=box width=0 height=0 style=filled fillcolor=white] + bgcolor=transparent + newrank=true; + subgraph cluster_0 { + style=dashed + node [shape=box width=0 height=0 style=filled fillcolor=white] + label = "foo"; + + "foo.sources" -> "foo.compile" -> "foo.classPath" -> "foo.assembly" + "foo.resources" -> "foo.assembly" + "foo.classPath" + } + subgraph cluster_1 { + style=dashed + node [shape=box width=0 height=0 style=filled fillcolor=white] + label = "bar"; + + "bar.sources" -> "bar.compile" -> "bar.classPath" -> "bar.assembly" [color=red, penwidth=2] + + "bar.sources" [color=red, penwidth=2] + "bar.lineCount" [color=red, penwidth=2] + "bar.resources" [color=red, penwidth=2] + "bar.assembly" [color=red, penwidth=2] + "bar.compile" [color=red, penwidth=2] + "bar.classPath" [color=red, penwidth=2] + "bar.sources" -> "bar.lineCount" -> "bar.resources" -> "bar.assembly" [color=red, penwidth=2] + } + "foo.classPath" -> "bar.compile" [constraint=false] + "foo.classPath" -> "bar.classPath" +} +``` + +In the example changing `bar/src/*.java`, Mill may also take the opportunity to parallelize +things: + +- `bar.compile` and `bar.classPath` can on a separate thread from `bar.lineCount` and `bar.resources` + +- `bar.assembly` must wait for both `bar.classPath` and `bar.resources` to complete before proceeding. + +This parallelization is automatically done by Mill, and requires no effort from the user to enable. +The exact parallelism may depend on the number of CPU cores available and exactly when each task +starts and how long it takes to run, but Mill will generally parallelize things where possible +to minimize the time taken to execute your tasks. + +Some other things to note: + +- Tasks have their metadata cached to xref:fundamentals/out-dir.adoc#_task_json[.json] files + in the xref:fundamentals/out-dir.adoc[out/ folder], with any files created by the task are cached + in xref:fundamentals/out-dir.adoc#_task_dest[.dest/] folders. These file paths are all + automatically assigned by Mill. + +- Mill treats builtin tasks (e.g. `compile`) and user-defined (e.g. `lineCount`) exactly the same. + Both get automatically cached or skipped when not needed, and parallelized where possible. + This happens without the task author needing to do anything to enable caching or parallelization + +- Mill evaluation does not care about the _module_ structure of `foo` and `bar`. Mill modules are + simply a way to define and re-use parts of the task graph, but it is the task graph that matters + during evaluation + +## Bootstrapping + +One part of the Mill evaluation model that is skimmed over above is what happens before +*Compilation*: how does Mill actually get everything necessary to compile your `build.mill` +and `package.mill` files? This is called bootstrapping, and proceeds roughly in the following phases: + +1. Mill's xref:cli/installation-ide.adoc#_bootstrap_scripts[bootstrap script] first checks + if the right version of Mill is already present, and if not it downloads the assembly jar + to `~/.mill/download` + +2. Mill instantiates an in-memory `MillBuildRootModule.BootstrapModule`, + which is a hard-coded `build.mill` used for bootstrapping Mill + +3. If there is a xref:extending/meta-build.adoc[meta-build] present `mill-build/build.mill`, Mill processes that + first and uses the `MillBuildRootModule` returned for the next steps. + Otherwise it uses the `MillBuildRootModule.BootstrapModule` directly + +4. Mill evaluates the `MillBuildRootModule` to parse the `build.mill`, generate + a list of `ivyDeps` as well as appropriately wrapped Scala code that we can + compile, and compiles it to classfiles (<> above) + +For most users, you do not need to care about the details of the Mill bootstrapping +process, except to know that you only need a JVM installed to begin with and +Mill will download everything necessary from the standard Maven Central package repository +starting from just the bootstrap script (available as `./mill` for Linux/Mac and `./mill.bat` +for Windows). The documentation for xref:extending/meta-build.adoc[The Mill Meta Build] +goes into more detail of how you can configure and make use of it. + +== Consequences of the Mill Execution Model + +This four-phase evaluation model has consequences for how you structure your +build. For example: + +1. You can have arbitrary code outside of ``Task``s that helps + set up your task graph and module hierarchy, e.g. computing what keys exist + in a `Cross` module, or specifying your `def moduleDeps`. This code runs + during <> + +2. You can have arbitrary code inside of ``Task``s, to perform your build + actions. This code runs during <> + +3. *But* your code inside of ``Task``s cannot influence the shape of the task + graph or module hierarchy, as all <> logic happens first + *before* any <> of the ``Task``s bodies. + +This should not be a problem for most builds, but it is something to be aware +of. In general, we have found that having "two places" to put code - outside of +``Task``s to run during <> or inside of ``Task``s to run during +<> - is generally enough flexibility for most use cases. You +can generally just write "direct style" business logic you need - in the example +above counting the lints in `allSourceFiles` - and Mill handles all the caching, +invalidation, and parallelism for you without any additional work. + +The hard boundary between these two phases is what lets users easily query +and visualize their module hierarchy and task graph without running them: using +xref:cli/builtin-commands.adoc#_inspect[inspect], xref:cli/builtin-commands.adoc#_plan[plan], +xref:cli/builtin-commands.adoc#_visualize[visualize], etc.. This helps keep your +Mill build discoverable even as the `build.mill` codebase grows. + +== Caching in Mill + +Apart from fine-grained caching of ``Task``s during *Evaluation*, Mill also +performs incremental evaluation of the other phases. This helps ensure +the overall workflow remains fast even for large projects: + +1. <>: + + * Done on-demand and incrementally using the Scala + incremental compiler https://github.com/sbt/zinc[Zinc]. + + * If some of the files `build.mill` imported changed but not others, only the + changed files are re-compiled before the `RootModule` is re-instantiated + + * In the common case where `build.mill` was not changed at all, this step is + skipped entirely and the `RootModule` object simply re-used from the last + run. + +2. <>: + + * If the `RootModule` was re-used, then all + previously-instantiated modules are simply-re-used + + * Any modules that are lazily instantiated during <> are + also re-used. + +3. <> + + * Planning is relatively quick most of the time, and is not currently cached. + +4. <>: + + * ``Task``s are evaluated in dependency order + + * xref:fundamentals/tasks.adoc#_cached_tasks[Cached Task]s only re-evaluate if their input ``Task``s + change. + + * xref:fundamentals/tasks.adoc#_persistent_tasks[Persistent Tasks]s preserve the `Task.dest` + folder on disk between runs, allowing for finer-grained caching than Mill's default task-by-task + caching and invalidation + + * xref:fundamentals/tasks.adoc#_workers[Worker]s are kept in-memory between runs where possible, and only + invalidated if their input ``Task``s change as well. + + * ``Task``s in general are invalidated if the code they depend on changes, + at a method-level granularity via callgraph reachability analysis. See + https://github.com/com-lihaoyi/mill/pull/2417[#2417] for more details + +This approach to caching does assume a certain programming style inside your +Mill build: + +- Mill may-or-may-not instantiate the modules in your `build.mill` the first time + you run something (due to laziness) + +- Mill may-or-may-not *re*-instantiate the modules in your `build.mill` in subsequent runs + (due to caching) + +- Mill may-or-may-not re-execute any particular task depending on caching, + but your code needs to work either way. + +- Execution of any task may-or-may-not happen in parallel with other unrelated + tasks, and may happen in arbitrary order + +Your build code code needs to work regardless of which order they are executed in. +However, for code written in a typical Scala style (which tends to avoid side effects), +and limits filesystem operations to the `Task.dest` folder, this is not a problem at all. + +One thing to note is for code that runs during *Resolution*: any reading of +external mutable state needs to be wrapped in an `interp.watchValue{...}` +wrapper. This ensures that Mill knows where these external reads are, so that +it can check if their value changed and if so re-instantiate `RootModule` with +the new value. diff --git a/docs/modules/ROOT/pages/depth/process-architecture.adoc b/docs/modules/ROOT/pages/depth/process-architecture.adoc new file mode 100644 index 00000000000..436be147912 --- /dev/null +++ b/docs/modules/ROOT/pages/depth/process-architecture.adoc @@ -0,0 +1,167 @@ += The Mill Process Architecture + +include::partial$gtag-config.adoc[] + +This page goes into detail of how the Mill process and application is structured. +At a high-level, a simplified version of the main components and data-flows within +a running Mill process is shown below: + +```graphviz +digraph G { + rankdir=LR + node [shape=box width=0 height=0 style=filled fillcolor=white] + bgcolor=transparent + + "client-stdin" [penwidth=0] + "client-stdout" [penwidth=0] + "client-stderr" [penwidth=0] + "client-exit" [penwidth=0] + "client-args" [penwidth=0] + subgraph cluster_client { + label = "mill client"; + "Socket" + "MillClientMain" + } + "client-stdin" -> "Socket" + "client-stderr" -> "Socket" [dir=back] + "client-stdout" -> "Socket" [dir=back] + "client-args" -> "MillClientMain" + "client-exit" -> "MillClientMain" [dir=back] + "MillClientMain" -> "runArgs" + subgraph cluster_out { + label = "out/"; + + + subgraph cluster_mill_server_folder { + label = "mill-server/"; + "socketPort" [penwidth=0] + "exitCode" [penwidth=0] + "runArgs" [penwidth=0] + } + subgraph cluster_out_foo_folder { + label = "foo/"; + "compile.json" [penwidth=0] + "compile.dest" [penwidth=0] + "assembly.json" [penwidth=0] + "assembly.dest" [penwidth=0] + + } + } + + + subgraph cluster_server { + label = "mill server"; + "PromptLogger" + "MillServerMain" + "Evaluator" + "ServerSocket" + + "server-stdout" [penwidth=0] + "server-stderr" [penwidth=0] + subgraph cluster_classloder { + label = "URLClassLoader"; + subgraph cluster_build { + style=dashed + label = "build"; + subgraph cluster_foo { + style=dashed + label = "foo"; + + "foo.sources" -> "foo.compile" -> "foo.classPath" -> "foo.assembly" + "foo.resources" -> "foo.assembly" + "foo.classPath" + } + } + + } + } + + + "runArgs" -> "MillServerMain" + "MillServerMain" -> "Evaluator" [dir=both] + "ServerSocket" -> "PromptLogger" [dir=back] + "exitCode" -> "MillServerMain" [dir=back] + "MillClientMain" -> "exitCode" [dir=back] + "Socket" -> "socketPort" [dir=both] + "socketPort" -> "ServerSocket" [dir=both] + + "PromptLogger" -> "server-stderr" [dir=back] + "PromptLogger" -> "server-stdout" [dir=back] + "compile.dest" -> "foo.compile" [dir=both] + "compile.json" -> "foo.compile" [dir=both] + + "assembly.dest" -> "foo.assembly" [dir=both] + "assembly.json" -> "foo.assembly" [dir=both] +} +``` + + +== The Mill Client + +The Mill client is a small Java application that is responsible for launching +and delegating work to the Mill server, a long-lived process. Each `./mill` +command spawns a new Mill client, but generally re-uses the same Mill server where +possible in order to reduce startup overhead and to allow the Mill server +process to warm up and provide good performance + +* The Mill client takes all the inputs of a typical command-line application - +stdin and command-line arguments - and proxies them to the long-lived Mill +server process. + +* It then takes the outputs from the Mill server - stdout, stderr, +and finally the exitcode - and proxies those back to the calling process or terminal. + +In this way, the Mill client acts and behaves for most all intents and purposes +as a normal CLI application, except it is really a thin wrapper around logic that +is actually running in the long-lived Mill server. + +The Mill server sometimes is shut down and needs to be restarted, e.g. if Mill +version changed, or the user used `Ctrl-C` to interrupt the ongoing computation. +In such a scenario, the Mill client will automatically restart the server the next +time it is run, so apart from a slight performance penalty from starting a "cold" +Mill server such shutdowns and restarts should be mostly invisibl to the user. + +== The Mill Server + +The Mill server is a long-lived process that the Mill client spawns. +Only one Mill server should be running in a codebase at a time, and each server +takes a filelock at startup time to enforce this mutual exclusion. + +The Mill server compiles your `build.mill` and `package.mill`, spawns a +`URLClassLoader` containing the compiled classfiles, and uses that to instantiate +the variousxref:fundamentals/modules.adoc[] and xref:fundamentals/tasks.adoc[] +dynamically in-memory. These are then used by the `Evaluator`, which resolves, +plans, and executes the tasks specified by the given `runArgs` + +During execution, both standard output +and standard error are captured during evaluation and forwarded to the `PromptLogger`. +`PromptLogger` annotates the output stream with the line-prefixes, prompt, and ANSI +terminal commands necessary to generate the dynamic prompt, and then forwards both +streams multi-plexed over a single socket stream back to the Mill client. The client +then de-multiplexes the combined stream to split it back into output and error, which +are then both forwarded to the process or terminal that invoked the Mill client. + +Lastly, when the Mill server completes its tasks, it writes the `exitCode` to a file +that is then propagated back to the Mill client. The Mill client terminates with this +exit code, but the Mill server remains alive and ready to serve to the next Mill +client that connects to it + +For a more detailed discussion of what exactly goes into "execution", see +xref:depth/execution-model.adoc[]. + + +== The Out Folder + +The `out/` directory is where most of Mill's state lives on disk, both build-task state +such as the `foo/compile.json` metadata cache for `foo.compile`, or the `foo/compile.dest` +which stores any generated files or binaries. It also contains `mill-server/` folder which +is used to pass data back and forth between the client and server: the `runArgs`, `exitCode`, +etc. + +Each task during evaluation reads and writes from its own designated paths in the `out/` +folder. Each task's files are not touched by any other tasks, nor are they used in the rest +of the Mill architecture: they are solely meant to serve each task's caching and filesystem +needs. + +More documentation on what the `out/` directory contains and how to make use of it can be +found at xref:fundamentals/out-dir.adoc[]. diff --git a/docs/modules/ROOT/pages/depth/sandboxing.adoc b/docs/modules/ROOT/pages/depth/sandboxing.adoc new file mode 100644 index 00000000000..118aaf01ddc --- /dev/null +++ b/docs/modules/ROOT/pages/depth/sandboxing.adoc @@ -0,0 +1,27 @@ += Mill Sandboxing +:page-aliases: Mill_Sandboxing.adoc + +include::partial$gtag-config.adoc[] + +== Task Sandboxing + +include::partial$example/depth/sandbox/1-task.adoc[] + +== Test Sandboxing + +include::partial$example/depth/sandbox/2-test.adoc[] + +== Breaking Out Of Sandbox Folders + +include::partial$example/depth/sandbox/3-breaking.adoc[] + +== Limitations + +Mill's approach to filesystem sandboxing is designed to avoid accidental interference +between different Mill tasks. It is not designed to block intentional misbehavior, and +tasks are always able to traverse the filesystem and do whatever they want. Furthermore, +Mill's redirection of `os.pwd` does not apply to `java.io` or `java.nio` APIs, which are +outside of Mill's control. + +However, by setting `os.pwd` to safe sandbox folders, we hope to minimize the cases where +someone accidentally causes issues with their build by doing the wrong thing. \ No newline at end of file diff --git a/docs/modules/ROOT/pages/depth/why-scala.adoc b/docs/modules/ROOT/pages/depth/why-scala.adoc new file mode 100644 index 00000000000..3f880088be1 --- /dev/null +++ b/docs/modules/ROOT/pages/depth/why-scala.adoc @@ -0,0 +1,286 @@ += Why does Mill use Scala? + +include::partial$gtag-config.adoc[] + +One question that comes up a lot among Mill users is why use Scala as the language +to configure your build? Why not YAML, XML, TOML, Bash, Groovy, Python, Java, or any of the +other hundred programming and configuration languages in widespread use today? Scala +is definitely a niche language, but it also has some unique properties that make it +especially suitable to be used for configuring the build system of a small or large project. + +For the purposes of this page, we will break down this topic into three top-level questions: why +Mill uses a _general-purpose programming language_, why Mill uses +the _Scala_ Language, and why Mill wants to run on the _Java Virtual Machine_ + +== Why a General Purpose Language? + +While Mill uses a general-purpose programming language (Scala), many build tools use +restricted config languages instead, or their own custom tool-specific languages. Why +that is the case is an interesting discussion. + +=== Why Not Config Languages? + +Many build tools use restricted config languages rather than a general-purpose language: + +|=== +| Language | Tool +| https://en.wikipedia.org/wiki/XML[XML] | https://maven.apache.org/[Maven], https://en.wikipedia.org/wiki/MSBuild[MSBuild], https://ant.apache.org/[Ant] +| https://toml.io/en/[TOML] | https://packaging.python.org/en/latest/guides/writing-pyproject-toml/[pyproject.toml], https://doc.rust-lang.org/cargo/guide/[Cargo], https://python-poetry.org/[Poetry] +| https://en.wikipedia.org/wiki/JSON[JSON] | https://docs.npmjs.com/cli/v10/configuring-npm/package-json/[NPM] +| https://en.wikipedia.org/wiki/YAML[YAML] | https://bleep.build/docs/[Bleep] +|=== + +At a first glance, using a restricted language is tempting: restricted languages _are_ +simpler than general purpose languages, with less complexity. However, build systems +are often fundamentally complex systems, especially as codebases or organizations grow. +Often projects find themselves with custom build system requirements that do not fit nicely +into these "simple metadata formats": + +* Code generation: https://protobuf.dev/[protobuf], https://www.openapis.org/[OpenAPI/Swagger], https://stackoverflow.com/questions/26217488/what-is-vendoring[vendoring code], etc. +* Resource generation: static resource pipelines, version metadata, https://www.cisa.gov/sbom[SBOM]s, etc. +* Deployment workflows: https://www.docker.com/[docker], https://kubernetes.io/[kubernetes], https://aws.amazon.com/[AWS], etc. + +While most "common" workflows in "common" build tools will have _some_ built in support +or plugin, it may not do exactly what you need, the plugin may be unmaintained, or you +may hit some requirement unique to your business. +When using a restricted config language to configure your build, and you hit one of these +unusual requirements, there are usually four outcomes: + +1. Find some third-party "plugin" (and these build systems always have plugins!) written + in a general-purpose language to use in your XML/TOML/JSON file, or write your own plugin + if none of the off-the-shelve plugins exactly match your requirements (which is very likely!) + +2. Have your build tool delegate the logic to some "run bash script" step, and implement your + custom logic in the bash script that the build tool wraps + +3. Take the build tool and wrap _it_ in a bash script, that implements the custom logic and + configures the build tool dynamically on the fly by generating XML/TOML/JSON files or + passing environment variables or properties + +4. Extend your XML/TOML/JSON config language with general-purpose programming language + features: conditionals, variables, functions, etc. (see https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does/evaluate-expressions-in-workflows-and-actions[Github Config Expressions], + https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference.html[AWS Cloudformation Functions], + https://helm.sh/docs/chart_best_practices/templates/[Helm Chart Templating]) + +Anyone who has been in industry for some time has likely seen all of these outcomes at various +companies and in various codebases. The fundamental issue is that _build systems are inherently +complex_, and if your built tool _language_ does not accommodate that complexity, the complexity +ends up leaking elsewhere outside the purview of the build tool. + +Mill's choice of using a general purpose language does mean more complexity in the core +language or build tool, but it also allows you to _move complexity into the build tool_ +rather than having it live in third-party plugins, ad-hoc bash scripts, or weird pseudo-languages +embedded in your config. A general-purpose language likely has better IDE integration, +tooling, safety, performance, and ecosystem support than bash scripts or +embedded-pseudo-config-languages, and if your build system complexity has to live _somewhere_, +it's better to write it in a general-purpose language where it can be properly managed, +as long as a suitable general-purpose language can be found. + +=== Why Not A Custom Language? + +Many build tools do use custom languages: + +- https://cmake.org/[CMake] +- https://www.gnu.org/software/autoconf/[Autoconf] +- https://earthly.dev/[Earthly] +- https://www.gnu.org/software/make/[Make] + +The basic issue with custom languages is that although they may in theory match your +build tool requirements perfectly, they lack in all other areas that a widely-used +general-purpose language does well: + +- IDE support in all popular IDEs and editors +- Library ecosystem of publicly available helpers and plugins +- Package publishing and distribution infrastructure +- Tooling: debuggers, profilers +- _Quality_ of the language itself: things like type systems, basic syntax, standard library, + error messages, etc. + +A custom tool-specific language implemented in a few person-months will definitely be +much less polished in all these areas that a widely-used general-purpose language that +has been gradually improved upon for a decade or two. While in theory a custom language could +catch up with enough staffing to implement all these features, in practice even projects +like https://www.gnu.org/software/make/[Make] that are used for decades fall behind niche +general-purpose languages when it comes to the supporting ecosystem above. As an example, +how do you publish a re-usable makefile that others can depend on and adjust to fit their +requirements? And how is the IDE experience of navigating around large `Makefiles`? + +Using a general-purpose language to configure a build tool provides all these things +out of the box. Provided, again, that a suitable language can be found! + + +== Why the Scala Language? +=== Conciseness + +A build language has to be concise; although Java and C++ are popular and widely used, +you rarely see people writing their build logic in Java or C++ +(with https://rife2.com/bld[some exceptions]), and even XML is pretty rare these days +(with https://maven.apache.org/[Maven] being the notable exception). Programming and Configuration language +verbosity is a spectrum, and the languages used to configure the build are typically +in the less-verbose end of the spectrum: + +|=== +| Language | Tool +| https://www.python.org/[Python] (or https://github.com/bazelbuild/starlark[StarLark]) | https://bazel.build/[Bazel], https://www.pantsbuild.org/[Pants], https://buck.build/[Buck], https://scons.org/[Scons] +| https://groovy-lang.org/[Groovy] | https://gradle.org/[Gradle] +| https://www.ruby-lang.org/en/[Ruby] | https://github.com/ruby/rake[Rake] +| https://en.wikipedia.org/wiki/JavaScript[Javascript] | https://en.wikipedia.org/wiki/Grunt_(software)[Grunt], https://en.wikipedia.org/wiki/Gulp.js[Gulp], etc. +|=== + +While some tools go even more concise (e.g. Bash, Make, etc.), typically this +Python/Groovy/Ruby/TOML/YAML level of conciseness is where most build tools end up. + +Given that, Scala fits right in: it is neither too verbose (like Java/C++/XML), nor is +it as terse as the syntaxes of Bash or Make. Mill's bundled libraries like +https://github.com/com-lihaoyi/requests-scala[Requests-Scala] or +https://github.com/com-lihaoyi/os-lib[OS-Lib], they would not look out of place in any +Python or Ruby codebase. Scala's balance between conciseness and verbosity is +more or less what we want when configuring a build. + +=== Static Typing + +Scala is a statically typed language. That has many consequences: good performance, +powerful linting frameworks (e.g. https://scalacenter.github.io/scalafix/[Scalafix]), +good toolability, and protection against many classes of "dumb bugs". + +For a build tool like Mill, perhaps what matters most is: +_toolability_ and _protection against dumb bugs_. + +Most developers using a build tool are +not build tool experts, and have no desire to become build tool experts. They will +forever be cargo-culting examples they find online, copy-pasting from other parts of the +codebase, or blindly fumbling their customizations. It is in this +context that Mill's static typing really shines: what such "perpetual beginners" need +most is help understanding/navigating the build logic, and help checking their +proposed changes for dumb mistakes. And there will be dumb mistakes, because most +people are not and will never be build-tool experts or enthusiasts + +To that end, Mill's static typing gives it a big advantage here. It's IDE support +is much better xref:comparisons/maven.adoc[compared to Maven] or +xref:comparisons/maven.adoc[compared to Gradle], and that is largely due to the +way Scala's static types give the IDE more to work with than more dynamic languages. +And, while Scala's static typing won't catch every subtle bug, it does do a good job +at catching the dumb bugs that non-experts will typically make when configuring their +build system + +Almost every programming language these days is statically typed to some degree, +Python has https://github.com/python/mypy[MyPy], Ruby has https://sorbet.org/[Sorbet], +Javascript has https://www.typescriptlang.org/[TypeScript], and so on. But +Scala has static typing built deeply into the core of the language, and so it works +more smoothly than other languages which have static typing bolted on after-the-fact: +The syntax is slicker, the IDEs work better, the error reporting is friendlier. +And that's why Scala's static typing really shines when used in Mill builds even +for non-experts with no prior background in Scala. + +=== Functional and Object-Oriented Features + +Scala is perhaps the language that sits most squarely on the fence between functional +and object-oriented programming: + +* It provides functional features from basic first-class functions immutability, + all the way to more advanced techniques like Typeclasses + +* It also provides object oriented features, again from basic classes and overrides + to more advanced mixin trait composition and implicit conversions + +Mill makes heavy use of both the functional and object-oriented features of the Scala +language. As discussed in the section on xref:depth/design-principles.adoc[Mill Design Principles], +Mill models the _build graph_ using the functional call graph of your methods, +while Mill models the _module hierarchy_ using the object graph of your modules. And +this is not just a superficial resemblance, but the semantics deeply match what you would +expect in a hybrid functional/object-oriented program: Mill supports instantiating modules, +subclasses, inheritance via `extends`, `override`, `super`, +and so on. + +While these are non-trivial semantics, they are semantics that should be immediately +familiar to anyone who has ever passed programming 101 in college. You already _know_ +how `override` works or how `super` works in Mill, even if nobody told you! This approach +of "making your build code feel just like your application code" is the key to Mill's +approachability to people from varying backgrounds, and to allow the "perpetual non-experts" +typically modifying a build system to do so in a familiar and intuitive manner even if +they know nothing about the Scala language. + + +== Why the JVM Runtime? + +=== Dynamic Classloading + +One often-under-appreciated facet of the Java Virtual Machine is its ability to do dynamic +classloading. This functionality is largely irrelevant in the backend-service space that Java +is often used in (where the entire codebase is present during deployment), and has largely +failed as a mechanism for running un-trusted potentially-malicious code in a safe sandbox +(see https://en.wikipedia.org/wiki/Java_applet[Applets]). + +However, in the case of a build system, the need is different: you need to dynamically build, +load, and run a wide variety of mostly-trusted code. Most build systems do not provide any +hard security boundaries, and assume the code you get from your source control system is +not malicious. But build systems need to be pluggable, with the same build system +potentially being used to manage a wide variety of different tools and frameworks. + +It is in this context that the JVM's dynamic classloading shines, and Mill goes all in +dynamic classloading. Features like xref:extending/import-ivy-plugins.adoc[import $ivy], +xref:extending/running-jvm-code.adoc[Running Dynamic JVM Code], or the +xref:extending/meta-build.adoc[Mill Meta-Build] would be difficult-to-impossible in +less-dynamic platforms like Go, Swift, Rust, or C++. Mill simultaneously takes advantage of +the Scala language's xref:#_static_typing[Static Typing], while also leaning heavily on the +JVM's dynamic nature: it uses classloader hierarchies, dynamic class loading and unloading, +isolated and partially-isolated classloaders, bytecode instrumentation, the whole works. +It wouldn't be a stretch to say that a build tool like Mill could not be written on top of +any other platform than the JVM it runs on today. + +=== Huge JVM Tooling Ecosystem + +The JVM ecosystem is huge, not just for the Java language but also things like Kotlin, Scala, +Android, and so on. IDEs, debuggers, profilers, heap analyzers, if a software tool exists +you can bet there is an equivalent or integration with the JVM ecosystem. + +From the perspective of IDE support, Mill is able to get (almost) full support for understanding +and navigating its `build.mill` files, basically for free: IntelliJ already has deep support +for understanding JVM code, classfiles, classpaths, the Scala language itself, and so on. +VSCode also works pretty well out-of-the-box with minimal modifications. + +Apart from the IDE, the Java ecosystem has perhaps some of the best tooling available of +any programming ecosystem, both free and proprietary, and Mill makes heavy use of it. If +a build is stuck, you can use `jstack` to see what it is doing. If a build is slow or running +out of memory, you can hook it up to https://www.ej-technologies.com/jprofiler[JProfiler] +or https://www.yourkit.com/[Yourkit] to see what is taking up space. + +Lastly there is the wealth of libraries: if something has a programming language integration, +there probably is one for Java, and Mill can make use of any Java libraries seamlessly +as part of the build using xref:extending/import-ivy-plugins.adoc[import $ivy] or +xref:extending/running-jvm-code.adoc[dynamic classloading]. With Mill, the ability to +directly import _any JVM artifact on the planet_ without needing a purpose-built plugin +open ups an enormous about of possibilities: anything that can be done in the Java ecosystem +can be done as part of your Mill build with a +single xref:extending/import-ivy-plugins.adoc[import $ivy]. + +=== Built-in Publishing Infrastructure + +The last major benefit Mill gets from running on the JVM is the publishing infrastructure: +primarily Sonatype's https://central.sonatype.com/[Maven Central]. Mill has a rich and +constantly growing set of xref:extending/thirdparty-plugins.adoc[Third-Party Plugins] that +are published on Maven Central for people to use, and anyone can easily +xref:extending/writing-plugins.adoc[write and publish their own]. While Maven Central isn't +perfect, it does a solid job as a package repository: hosting an enormous catalog of +artifacts for the Java community to build upon, with nice properties such as +https://central.sonatype.org/register/namespace/[namespacing], +https://search.maven.org/[discoverability], +https://central.sonatype.org/publish/requirements/immutability/[immutability], +and https://central.sonatype.org/publish/requirements/#sign-files-with-gpgpgp[code signing]. +Apart from Maven Central itself, there is a wealth of other hosted or self-hosted JVM +package repositories available for you to choose. + +Mill makes heavy use of Maven Central and the rest of the Java publishing infrastructure: +Mill's own artifacts are all published on Maven Central, Mill builds can resolve any +artifact from Maven Central to use in your build, and anyone can publish their own plugins +to Maven Central for free. it is easy to +xref:javalib/dependencies.adoc#_repository_config[configure alternate repositories], +and Mill provides a wealth of xref:fundamentals/library-deps.adoc[tools and techniques for +working with JVM dependencies]. + +Most build tools end up with some half-baked plugin distribution model: downloading source +code off of Github, ad-hoc package formats or zip files, published artifacts that can be +sneakily changed or even deleted after the fact, and so on. Mill instead relies on +the widely-used publishing and distribution system that every JVM project already uses, +providing a predictable and well-designed publishing and artifact distribution experience +far beyond what can be provided by most other build tools. \ No newline at end of file diff --git a/docs/modules/ROOT/pages/extending/contrib-plugins.adoc b/docs/modules/ROOT/pages/extending/contrib-plugins.adoc new file mode 100644 index 00000000000..f412f76bb0b --- /dev/null +++ b/docs/modules/ROOT/pages/extending/contrib-plugins.adoc @@ -0,0 +1,58 @@ += Contrib Plugins +:page-aliases: Contrib_Plugins.adoc, Contrib_Modules.adoc + +include::partial$gtag-config.adoc[] + +The ((plugins)) in this section are hosted in the Mill git tree and developed / maintained by the community. + +For details about including plugins in your `build.mill` read xref:extending/import-ivy-plugins.adoc[Using Mill Plugins]. + +[CAUTION] +-- +When using one of these contribution modules, it is important that the versions you load match your mill version. +To facilitate this, Mill will automatically replace the `$MILL_VERSION` literal in your ivy imports with the correct value. +You can also leave the version completely empty to default to the mill version (but don't forget to keep the trailing colon). + +For instance: + +[source,scala] +---- +import $ivy.`com.lihaoyi::mill-contrib-bloop:$MILL_VERSION` +---- + +or + +[source,scala] +---- +import $ivy.`com.lihaoyi::mill-contrib-bloop:` +---- + +-- + +== Importing Contrib Modules + +include::partial$example/extending/imports/3-contrib-import.adoc[] + + +== List of Contrib Plugins + +// See also the list in nav.adoc +* xref:contrib/artifactory.adoc[] +* xref:contrib/bintray.adoc[] +* xref:contrib/bloop.adoc[] +* xref:contrib/buildinfo.adoc[] +* xref:contrib/codeartifact.adoc[] +* xref:contrib/docker.adoc[] +* xref:contrib/flyway.adoc[] +* xref:contrib/gitlab.adoc[] +* xref:contrib/jmh.adoc[] +* xref:contrib/playlib.adoc[] +* xref:contrib/proguard.adoc[] +* xref:contrib/scalapblib.adoc[] +* xref:contrib/scoverage.adoc[] +* xref:contrib/sonatypecentral.adoc[] +* xref:contrib/testng.adoc[] +* xref:contrib/twirllib.adoc[] +* xref:contrib/versionfile.adoc[] + + diff --git a/docs/modules/ROOT/pages/extending/example-python-support.adoc b/docs/modules/ROOT/pages/extending/example-python-support.adoc new file mode 100644 index 00000000000..319929a5df4 --- /dev/null +++ b/docs/modules/ROOT/pages/extending/example-python-support.adoc @@ -0,0 +1,32 @@ += Example: Python Support + +include::partial$gtag-config.adoc[] + +This section demonstrates how to integrate `Python` support into `Mill`. +We will define a simple `PythonModule` trait that can resolve dependencies, +perform type checking on local code, and bundle an executable. + +NOTE: This integration is for educational purposes only, showcasing common technique +used in building language toolchains, and is not intended for production use. + +== Basic Python Build Pipeline + +include::partial$example/extending/python/1-hello-python.adoc[] + +== Re-usable PythonModule + +include::partial$example/extending/python/2-python-modules.adoc[] + +== PythonModule `moduleDeps` + +include::partial$example/extending/python/3-python-module-deps.adoc[] + +== PIP dependencies and bundling + +include::partial$example/extending/python/4-python-libs-bundle.adoc[] + + + +As mentioned, The `PythonModule` examples here demonstrate +how to add support for a new language toolchain in Mill. +A production-ready version would require more work to enhance features and performance. \ No newline at end of file diff --git a/docs/modules/ROOT/pages/extending/example-typescript-support.adoc b/docs/modules/ROOT/pages/extending/example-typescript-support.adoc new file mode 100644 index 00000000000..af3b7c8544f --- /dev/null +++ b/docs/modules/ROOT/pages/extending/example-typescript-support.adoc @@ -0,0 +1,44 @@ += Example: Typescript Support + +include::partial$gtag-config.adoc[] + +This section walks through the process of adding support for a new programming +language to Mill. We will be adding a small `trait TypeScriptModule` with the +ability to resolve dependencies, typecheck local code, and optimize a final +bundle. + +The TypeScript integration here is not intended for production usage, but is +instead meant for illustration purposes of the techniques typically used in +implementing language toolchains. + +== Basic TypeScript Build Pipeline + +include::partial$example/extending/typescript/1-hello-typescript.adoc[] + +== Re-usable TypeScriptModule + +include::partial$example/extending/typescript/2-typescript-modules.adoc[] + +== TypeScriptModule `moduleDeps` + +include::partial$example/extending/typescript/3-module-deps.adoc[] + +== NPM dependencies and bundling + +include::partial$example/extending/typescript/4-npm-deps-bundle.adoc[] + + + + +As mentioned earlier, the `TypeScriptModule` examples on this page are meant for +demo purposes: to show what it looks like to add support in Mill for a new +programming language toolchain. It would take significantly more work to flesh out +the featureset and performance of `TypeScriptModule` to be usable in a real world +build. But this should be enough to get you started working with Mill to add support +to any language you need: whether it's TypeScript or some other language, most programming +language toolchains have similar concepts of `compile`, `run`, `bundle`, etc. + + +As mentioned, The `PythonModule` examples here demonstrate +how to add support for a new language toolchain in Mill. +A production-ready version would require more work to enhance features and performance. \ No newline at end of file diff --git a/docs/modules/ROOT/pages/extending/import-ivy-plugins.adoc b/docs/modules/ROOT/pages/extending/import-ivy-plugins.adoc new file mode 100644 index 00000000000..dfc7eb838d2 --- /dev/null +++ b/docs/modules/ROOT/pages/extending/import-ivy-plugins.adoc @@ -0,0 +1,100 @@ += Import Libraries and Plugins +:page-aliases: Import_File_And_Import_Ivy.adoc, Using_Plugins.adoc + +include::partial$gtag-config.adoc[] + +This page illustrates usage of `import $ivy`. +`import $ivy` lets you import JVM dependencies into your `build.mill`, so +you can use arbitrary third-party libraries at build-time. This makes +lets you perform computations at build-time rather than run-time, +speeding up your application start up. Unlike most other build tools that +require purpose-built plugins to extend them, Mill's `import $ivy` can be used to pull +in any JVM library on Maven Central to use in your custom tasks, with +xref:extending/thirdparty-plugins.adoc[Third-Party Plugins] only necessary for +more sophisticated integrations. + + +== Importing Java Libraries + +include::partial$example/extending/imports/1-import-ivy.adoc[] + + +== Importing Scala Libraries + +include::partial$example/extending/imports/2-import-ivy-scala.adoc[] + +== Importing Plugins + +Mill plugins are ordinary JVM libraries jars and are loaded as any other external dependency with +the `import $ivy` mechanism. + +There exist a large number of Mill plugins, Many of them are available on GitHub and via +Maven Central. We also have a list of plugins, which is most likely not complete, but it +might be a good start if you are looking for plugins: xref:Thirdparty_Plugins.adoc[]. + +Some plugin contributions are also hosted in Mill's own git tree as xref:Contrib_Plugins.adoc[]. + +Mill plugins are typically bound to a specific version range of Mill. +This is called the binary platform. To ease the use of the correct versions and avoid runtime +issues (caused by binary incompatible plugins, which are hard to debug) you can apply one of the +following techniques: + +=== Use the specific Mill Binary Platform notation + +[source,scala] +---- +// for classic Scala dependencies +import $ivy.`::::` // <1> +// for dependencies specific to the exact Scala version +import $ivy.`:::::` // <2> +---- +<1> This is equivalent to ++ +[source,scala] +---- +import $ivy.`::_mill$MILL_BIN_PLATFORM:` +---- +<2> This is equivalent to ++ +[source,scala] +---- +import $ivy.`:::_mill$MILL_BIN_PLATFORM:` +---- + + +=== Use special placeholders in your `import $ivy` + +`$MILL_VERSION` :: ++ +-- +to substitute the currently used Mill version. +This is typical required for Mill contrib modules, which are developed in the Mill repository and highly bound to the current Mill version. + +.Example: Use `mill-contrib-bloop` plugin matching the current Mill version +---- +import $ivy.`com.lihaoyi:mill-contrib-bloop:$MILL_VERSION` +---- + +There is the even more convenient option to leave the version completely empty. +Mill will substitute it with its current version. +But don't forget to provide the trailing colon! + +.Example: Use `mill-contrib-bloop` plugin matching the current Mill version +---- +import $ivy.`com.lihaoyi:mill-contrib-bloop:` +---- +-- + +`$MILL_BIN_PLATFORM` :: ++ +-- +to substitute the currently used Mill binary platform. + +.Example: Using `mill-vcs-version` plugin matching the current Mill Binary Platform +---- +import $ivy.`de.tototec::de.tobiasroeser.mill.vcs.version_mill$MILL_BIN_PLATFORM:0.1.2` +---- +-- + +TIP: If you want to publish re-usable libraries that _other_ people can use in their builds, simply publish your code as a library to maven central. + diff --git a/docs/modules/ROOT/pages/extending/meta-build.adoc b/docs/modules/ROOT/pages/extending/meta-build.adoc new file mode 100644 index 00000000000..66a3e0045e9 --- /dev/null +++ b/docs/modules/ROOT/pages/extending/meta-build.adoc @@ -0,0 +1,54 @@ += The Mill Meta-Build +:page-aliases: The_Mill_Meta_Build.adoc + +include::partial$gtag-config.adoc[] + +The meta-build manages the compilation of the `build.mill`. +Customizing the meta-build gives you greater control over how exactly your +`build.mill` evaluates. + + +To customize it, you need to explicitly enable it with `import $meta._`. +Once enabled, the meta-build lives in the `mill-build/` directory. +It needs to contain a top-level module of type `MillBuildRootModule`. + +If you don't configure it explicitly, a built-in synthetic meta-build is used. +Meta-builds are recursive, which means, it can itself have a nested meta-builds, and so on. + +To run a task on a meta-build, you specify the `--meta-level` option to select +the meta-build level. + +== Autoformatting the `build.mill` + +As an example of running a task on the meta-build, you can format the `build.mill` with Scalafmt. +Everything is already provided by Mill. +You only need a `.scalafmt.conf` config file which at least needs configure the Scalafmt version. + +.Run Scalafmt on the `build.mill` (and potentially included files) +---- +$ mill --meta-level 1 mill.scalalib.scalafmt.ScalafmtModule/ +---- + +* `--meta-level 1` selects the first meta-build. Without any customization, this is the only built-in meta-build. +* `mill.scalalib.scalafmt.ScalafmtModule/reformatAll` is a generic task to format scala source files with Scalafmt. It requires the tasks that refer to the source files as argument +* `sources` this selects the `sources` tasks of the meta-build, which at least contains the `build.mill`. + +== Finding plugin updates + +Mill plugins are defined as `ivyDeps` in the meta-build. +Hence, you can easily search for updates with the external `mill.scalalib.Dependency` module. + +.Check for Mill Plugin updates +---- +$ mill --meta-level 1 mill.scalalib.Dependency/showUpdates +Found 1 dependency update for +de.tototec:de.tobiasroeser.mill.vcs.version_mill0.11_2.13 : 0.3.1-> 0.4.0 +---- + +== Sharing Libraries between `build.mill` and Application Code + +include::partial$example/extending/metabuild/4-meta-build.adoc[] + +== Sharing Source Code between `build.mill` and Application Code + +include::partial$example/extending/metabuild/5-meta-shared-sources.adoc[] \ No newline at end of file diff --git a/docs/modules/ROOT/pages/extending/running-jvm-code.adoc b/docs/modules/ROOT/pages/extending/running-jvm-code.adoc new file mode 100644 index 00000000000..9a02338c99c --- /dev/null +++ b/docs/modules/ROOT/pages/extending/running-jvm-code.adoc @@ -0,0 +1,51 @@ += Running Dynamic JVM Code + +include::partial$gtag-config.adoc[] + +While xref:extending/import-ivy-plugins.adoc[import $ivy] is convenient, +it comes with limitations as the JVM library it imports is global to your build: + +1. The library has to be resolved and downloaded before any part of your build starts. + If your codebase is large and most parts of your build don't use that library, + needing to download the library when working on parts that don't need it can be wasteful + +2. The library can only have one version across the entire build. This can be an issue if + you need to have multiple versions of the library used in different parts of your build. + e.g. different parts of a large Groovy codebase may use different versions of the Groovy + interpreter, and so the Groovy interpreter cannot be included via `import $ivy` because the + different versions would collide. + +3. The library cannot be built as part of your main build. While it is possible to build + it as part of your xref:extending/meta-build.adoc[Meta-Build], that comes with additional + complexity and limitations. In a large codebase, you often end up building modules that + are shared between production deployments as well as local tooling: in such cases + `import $ivy` is not a good fit + + +In scenarios where these limitations cause issues, Mill provides other ways to run arbitrary +JVM code apart from `import $ivy`. + + +== Subprocesses + +include::partial$example/extending/jvmcode/1-subprocess.adoc[] + +== In-process Isolated Classloaders + +include::partial$example/extending/jvmcode/2-classloader.adoc[] + +== Classloader Worker Tasks + +include::partial$example/extending/jvmcode/3-worker.adoc[] + +== Running a ScalaModule in a Subprocess + +include::partial$example/extending/jvmcode/4-module-run-task.adoc[] + +== Running a JavaModule in a Classloader + +include::partial$example/extending/jvmcode/5-module-classloader.adoc[] + +== Caching and Re-using JVM subprocesses and classloaders + +include::partial$example/extending/jvmcode/6-module-cached-classloader.adoc[] \ No newline at end of file diff --git a/docs/modules/ROOT/pages/extending/thirdparty-plugins.adoc b/docs/modules/ROOT/pages/extending/thirdparty-plugins.adoc new file mode 100644 index 00000000000..8502da9032f --- /dev/null +++ b/docs/modules/ROOT/pages/extending/thirdparty-plugins.adoc @@ -0,0 +1,1143 @@ += Third-Party Plugins +:page-aliases: Thirdparty_Modules.adoc, Thirdparty_Plugins.adoc + +include::partial$gtag-config.adoc[] + +The Plugins in this section are developed/maintained outside the mill git tree. +This list is most likely not complete. +If you wrote a Mill plugin or find that one is missing in this list, please open a {mill-github-url}/pulls[pull request] and add that plugin with a short description (in alphabetical order). + +For details about including plugins in your `build.mill` read xref:extending/import-ivy-plugins.adoc[Using Mill Plugins]. + +CAUTION: Besides the documentation provided here, we urge you to consult the respective linked plugin documentation pages. +The usage examples given here are most probably incomplete and sometimes outdated! + +Additional to this list, you can also search the https://github.com/topics/mill-plugin[`mill-plugin` topic on GitHub] for more plugins. + + +== Aliases + +This plugin adds an opinionated way of defining aliases to Mill builds. Added aliases are global and applied to the whole build. + +Project home: https://github.com/carlosedp/mill-aliases + +[source,scala] +---- +import mill._, scalalib._ +import $ivy.`com.carlosedp::mill-aliases::0.2.1` +import com.carlosedp.aliases._ + +object foo extends ScalaModule { + ... +} + +object MyAliases extends Aliases { + def testall = alias("__.test") + def compileall = alias("__.compile") + def comptestall = alias("__.compile", "__.test") +} +---- + +To show all the defined aliases: + +```sh +./mill Alias/list +``` + +Run an alias: + +```sh +./mill Alias/run testall +``` + +When run, each aliased task is checked if valid. + +== Antlr + +https://www.antlr.org/[ANTLR parser generator] support for mill. + +Project home: https://github.com/ml86/mill-antlr + + +[source,scala] +---- +import $ivy.`net.mlbox::mill-antlr:0.1.0` +import net.mlbox.millantlr.AntlrModule + +object foo extends ScalaModule with AntlrModule { + override def antlrGrammarSources = Task.Sources { + Seq(os.pwd/"someGrammar.g4").map(PathRef(_)) + } +} +---- + +== AspectJ + +https://projects.eclipse.org/projects/tools.aspectj[AspectJ compiler] support for mill. + +Project home: https://github.com/lefou/mill-aspectj + + +[source,scala] +---- +import mill._ +import mill.scalalib._ +import mill.define._ + +// Load the plugin from Maven Central via ivy/coursier +import $ivy.`de.tototec::de.tobiasroeser.mill.aspectj_mill0.9:0.3.1-12-89db01 +import de.tobiasroeser.mill.aspectj._ + +object main extends AspectjModule { + + // Select the AspectJ version + def aspectjVersion = "1.9.5" + + // Set AspectJ options, e.g. the language level and annotation processor + // Run `mill main.ajcHelp` to get a list of supported options + def ajcOptions = Seq("-8", "-proc:none") + +} +---- + +For documentation, please refer to the https://github.com/lefou/mill-aspectj[project home page]. + +== Bash Completion + +Limited bash completion support. + +Project home: https://github.com/lefou/mill-bash-completion + +== Bundler + +`mill-bundler` is comparable to `scalajs-bundler` for SBT: It manages NPM dependencies for a Scala.js module and +bundling it. Currently Webpack and Rollup are implemented but it's easy to use another one. + +Project home: https://github.com/nafg/mill-bundler + + +== CI Release + +`mill-ci-release` is a wrapper around the existing publish functionality of +Mill with the aim to making releasing your project in GitHub Actions to Maven +easier by automating common setup such as setting up gpg in CI, setting up +versioning, and ensuring merges to into your main branch get published as a +SNAPSHOT. If you're coming from sbt, then you're likely familiar with +https://github.com/sbt/sbt-ci-release[`sbt-ci-release`] which this plugin +imitates. + +Project home: https://github.com/ckipp01/mill-ci-release + + +To get started, you'll want to use `CiReleaseModule` as a drop in replacement +where you'd normally use the Mill `PublishModule` and then ensure you implement +everything that `PublishModule` requires. + +Secondly, you'll need to ensure you have a few environment variables correctly +set in your GitHub repo. You can see detailed instructions on which are +necessary https://github.com/ckipp01/mill-ci-release#secrets[here]. + +Then in CI to publish you'll simply issue a single command: + +[source,yaml] +---- +- run: mill -i io.kipp.mill.ci.release.ReleaseModule/publishAll +---- + +This will automatically grab all the artifacts that you've defined to publish +in your build and publish them. Your version will automatically be managed by +https://github.com/lefou/mill-vcs-version[`mill-vcs-version`] and if your +version ends in `-SNAPSHOT` you're project will be published to Sonatype +Snapshots or to the normal releases if it's a new tag. + +== Daemon + +Use mill as a launcher for self-building systemd daemons, +convenient for handling of code-as-config, or quick editing and rebuilding +of code-generating templates. + +Project home: https://github.com/swaldman/mill-daemon + +Place the millw script from https://github.com/lefou/millw in your project directory. + +./opt/coolproj/build.mill +---- +import $ivy.`com.mchange::mill-daemon:0.0.1` +import com.mchange.milldaemon.DaemonModule + +object coolproj extends RootModule with DaemonModule { + override def runDaemonPidFile = Some( os.pwd / "coolproj.pid" ) +} +---- + +./opt/coolproj/rebuild-and-start +---- +#!/bin.bash + +./millw runMainDaemon coolproj.Main "$@" +---- + +./opt/coolproj/coolproj.service +---- +[Unit] +Description=Cool Project +After=syslog.target network.target + +[Service] +Type=forking +PIDFile=/opt/coolproj/coolproj.pid +User=coolproj +Group=coolproj +WorkingDirectory=/opt/coolproj +ExecStart=/opt/coolproj/rebuild-and-start +Restart=on-failure + +[Install] +WantedBy=multi-user.target +---- + +Symlink `/opt/coolproj/coolproj.service` from `/etc/systemd/system`, then `systemctl restart coolproj`. + +== DGraph + +Show transitive dependencies of your build in your browser. + +Project home: https://github.com/ajrnz/mill-dgraph + + +[source,scala] +---- +import $ivy.`com.github.ajrnz::mill-dgraph:0.2.0` +---- + +[source,sh] +---- +sh> mill plugin.dgraph.browseDeps(proj)() +---- + +== Docker Jib Packager +A wrapper around the https://github.com/GoogleContainerTools/jib[Google Jib Library]. +Allows to build Docker images without a running Docker daemon. +Also enables to layer an image more efficiently. + +Project home: https://github.com/GeorgOfenbeck/mill-docker + + +== Docker Native-Image Packager + +This plugin allows building Docker container images with GraalVM Native-Image +binaries for cloud-native and fast-startup applications. + +Project home: https://github.com/carlosedp/mill-docker-nativeimage + +Import the plugin, extend your module with `DockerNative` and configure the +parameters for your application using the `DockerNativeConfig` trait in the +`dockerNative` object. + +[source,scala] +---- +import mill._, mill.scalalib._, mill.scalalib.scalafmt._ +import $ivy.`com.carlosedp::mill-docker-nativeimage::0.6.0` +import com.carlosedp.milldockernative.DockerNative + +object hello extends ScalaModule with DockerNative { + def scalaVersion = "3.3.0" + object dockerNative extends DockerNativeConfig { + // Native Image parameters + def nativeImageName = "hello" + def nativeImageGraalVmJvmId = T("graalvm-java17:22.3.2") + def nativeImageClassPath = runClasspath() + def nativeImageMainClass = "com.domain.Hello.Hello" + // GraalVM parameters depending on your application needs + def nativeImageOptions = Seq( + "--no-fallback", + "--enable-url-protocols=http,https", + "-Djdk.http.auth.tunneling.disabledSchemes=", + ) ++ (if (sys.props.get("os.name").contains("Linux")) Seq("--static") else Seq.empty) + + // Generated Docker image parameters + def baseImage = "ubuntu:22.04" + def tags = List("docker.io/myuser/helloapp") + def exposedPorts = Seq(8080) + } +} +---- + +[source,sh] +---- +./mill hello.dockerNative.build() +# Test run +docker run -it --rm docker.io/myuser/helloapp + +# Push to a registry +./mill hello.dockerNative.push +---- + +For more details and configuration options, please refer to the project readme +and also check the provided example code. + +== Docusaurus 2 + +Simple Docusaurus runner for Mill + +The plugin provides a mill module that allows to build the project web site using https://docusaurus.io/[Docusaurus 2] as a static content generator. + +Project home. https://github.com/atooni/mill-docusaurus2 + + +== Ensime + +Create an http://ensime.github.io/[.ensime] file for your build. + +Project home: https://github.com/davoclavo/mill-ensime + + +[source,scala] +---- +import mill._ +interp.repositories() = + interp.repositories() ++ Seq(coursier.MavenRepository("https://jitpack.io")) + +@ + +import $ivy.`com.github.yyadavalli::mill-ensime:0.0.2` +---- + +You can then run the following to generate the .ensime file + +[source,sh] +---- +mill fun.valycorp.mill.GenEnsime/ensimeConfig +---- + +Optionally, you can specify the ensime server version using the –server flag like + +[source,sh] +---- +mill fun.valycorp.mill.GenEnsime/ensimeConfig --server "3.0.0-SNAPSHOT" +---- + +== Explicit Deps + +A plugin that checks that `ivyDeps` and `ivyCompileDeps` accurately reflect the direct dependencies of your source code. + +Project home: https://github.com/kierendavies/mill-explicit-deps + + +.`build.mill` +[source,scala] +---- +import $ivy.`io.github.kierendavies::mill-explicit-deps::0.1.0` +import io.github.kierendavies.mill.explicitdeps.ExplicitDepsModule + +object foo extends ScalaModule with ExplicitDepsModule { + // ... +} +---- + +[source,shell script] +---- +> mill foo.checkExplicitDeps +[37/37] main.checkExplicitDeps +Found undeclared dependencies: (add these to ivyDeps) + ivy"org.typelevel::cats-kernel:2.7.0", + +Found unimported dependencies: (remove these from ivyDeps) + ivy"org.typelevel::cats-effect:3.3.6", + +1 tasks failed +main.checkExplicitDeps Found 1 undeclared dependencies, 1 unimported dependencies +---- + +== Fish Completion + +Limited fish completion support. + +Project home: https://github.com/ckipp01/mill-fish-completions + +== Giter8 + +A plugin to test the generation of your +http://www.foundweekends.org/giter8/index.html[Giter8] template and expected +working targets for your template after generation. + +Project home: https://github.com/ckipp01/mill-giter8 + + +.`build.mill` +[source,scala] +---- +import $ivy.`io.chris-kipp::mill-giter8::0.2.0` + +import io.kipp.mill.giter8.G8Module + +object g8 extends G8Module { + override def validationTargets = + Seq("example.compile", "example.fix", "example.reformat") +} +---- + +The most common task you'd then use is `mill g8.validate`. + +== Git + +A git version plugin for mill. + +Project home: https://github.com/joan38/mill-git + +_build.mill_: + +[source,scala] +---- +import $ivy.`com.goyeau::mill-git:` +import com.goyeau.mill.git.GitVersionedPublishModule +import mill.scalalib.JavaModule +import mill.scalalib.publish.{Developer, License, PomSettings, VersionControl} + +object `jvm-project` extends JavaModule with GitVersionedPublishModule { + override def pomSettings = PomSettings( + description = "JVM Project", + organization = "com.goyeau", + url = "https://github.com/joan38/mill-git", + licenses = Seq(License.MIT), + versionControl = VersionControl.github("joan38", "mill-git"), + developers = Seq(Developer("joan38", "Joan Goyeau", "https://github.com/joan38")) + ) +} +---- + +[source,shell script] +---- +> mill show jvm-project.publishVersion +[1/1] show +[2/2] com.goyeau.mill.git.GitVersionModule.version +"0.0.0-470-6d0b3d9" +---- + +== GitHub Dependency Graph Submission + +A plugin to submit your mill dependency graph to GiHub through their +https://github.blog/2022-06-17-creating-comprehensive-dependency-graph-build-time-detection/[Dependency +Submission API]. + +Project home: https://github.com/ckipp01/mill-github-dependency-graph + + +The easiest way to use this plugin is with the +https://github.com/ckipp01/mill-github-dependency-graph[mill-dependency-submission] +action. You can add it as a workflow: + +[source,yaml] +---- +name: github-dependency-graph + +on: + push: + branches: + - main + +jobs: + submit-dependency-graph: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: coursier/cache-action@v6 + - uses: actions/setup-java@v3 + with: + distribution: 'temurin' + java-version: '17' + - uses: ckipp01/mill-dependency-submission@v1 +---- + + +== Hepek + +`mill-hepek` is a plugin for writing Scala `object`s to files. + +It is used as a core for Hepek static site generator: https://sake92.github.io/hepek/hepek/index.html . + +Project home: https://github.com/sake92/mill-hepek + + + +== Integration Testing Mill Plugins + +Integration testing for mill plugins. + +Project home: https://github.com/lefou/mill-integrationtest + + +We assume, you have a mill plugin named `mill-demo` + +[source,scala] +---- +// build.mill +import mill._, mill.scalalib._ +object demo extends ScalaModule with PublishModule { + // ... +} +---- + +Add a new test sub-project, e.g. `itest`. + +[source,scala] +---- +// build.mill +import $ivy.`de.tototec::de.tobiasroeser.mill.integrationtest_mill0.9:0.4.0` +import de.tobiasroeser.mill.integrationtest._ + +object demo extends ScalaModule with PublishModule { + // ... +} + +object itest extends MillIntegrationTestModule { + + def millTestVersion = "0.9.3" + + def pluginsUnderTest = Seq(demo) + +} +---- + +Your project should now look similar to this: + +[source,text] +---- +. ++-- demo/ +| +-- src/ +| ++-- it/ + +-- src/ + +-- 01-first-test/ + | +-- build.mill + | +-- src/ + | + +-- 02-second-test/ + +-- build.mill +---- + +As the buildfiles `build.mill` in your test cases typically want to access the locally built plugin(s), +the plugins publishes all plugins referenced under `pluginsUnderTest` to a temporary ivy repository, just before the test is executed. +The mill version used in the integration test then used that temporary ivy repository. + +Instead of referring to your plugin with `import $ivy.'your::plugin:version'`, +you can use the following line instead, which ensures you will use the correct locally build plugins. + +[source,scala] +---- +// build.mill +import $exec.plugins +---- + +Effectively, at execution time, this line gets replaced by the content of `plugins.sc`, a file which was generated just before the test started to execute. + +Please always refer to the https://github.com/lefou/mill-integrationtest[official plugin documentation site] for complete and up-to-date information. + +== JaCoCo - Code Coverage + +Mill plugin to collect test coverage data with https://www.jacoco.org/jacoco/[JaCoCo] and generate reports. + +Plugin home: https://github.com/lefou/mill-jacoco + + + +== JBake + +Create static sites/blogs with JBake. + +Plugin home: https://github.com/lefou/mill-jbake + +JBake home: https://jbake.org + + +[source,scala] +---- +// build.mill +import mill._ +import $ivy.`de.tototec::de.tobiasroeser.mill.jbake:0.1.0` +import de.tobiasroeser.mill.jbake._ + +object site extends JBakeModule { + + def jbakeVersion = "2.6.4" + +} +---- + +Generate the site: + +[source,sh] +---- +bash> mill site.jbake +---- + +Start a local Web-Server on Port 8820 with the generated site: + +[source,sh] +---- +bash> mill site.jbakeServe +---- + +== JBuildInfo + +This is a Mill module similar to +xref:contrib/buildinfo.adoc[BuildInfo] +but for Java. +It will generate a Java class containing information from your build. + +Project home: https://github.com/carueda/mill-jbuildinfo + +To declare a module that uses this plugin, extend the +`com.github.carueda.mill.JBuildInfo` trait and provide +the desired information via the `buildInfoMembers` method: + +[source,scala] +---- +// build.mill +import $ivy.`com.github.carueda::jbuildinfo:0.1.2` +import com.github.carueda.mill.JBuildInfo +import mill.T + +object project extends JBuildInfo { + def buildInfoMembers: T[Map[String, String]] = Task { + Map( + "name" -> "some name", + "version" -> "x.y.z" + ) + } +} +---- + +This will generate: + +[source,java] +---- +// BuildInfo.java +public class BuildInfo { + public static final String getName() { return "some name"; } + public static final String getVersion() { return "x.y.z"; } +} +---- + + +* `def buildInfoMembers: T[Map[String, String]]` + +The map containing all member names and values for the generated class. + +* `def buildInfoClassName: String`, default: `BuildInfo` + +The name of the class that will contain all the members from +`buildInfoMembers`. + +* `def buildInfoPackageName: Option[String]`, default: `None` + +The package name for the generated class. + + +== Kotlin + +https://kotlinlang.org/[Kotlin] compiler support for mill. + +Project home: https://github.com/lefou/mill-kotlin + +[source,scala] +---- +// Load the plugin from Maven Central via ivy/coursier +import $ivy.`de.tototec::de.tobiasroeser.mill.kotlin_mill0.9:0.2.0` + +import mill._ +import mill.scalalib._ +import mill.define._ + +import de.tobiasroeser.mill.kotlin._ + +object main extends KotlinModule { + + // Select the Kotlin version + def kotlinVersion = "1.4.21" + + // Set additional Kotlin compiler options, e.g. the language level and annotation processor + // Run `mill main.kotlincHelp` to get a list of supported options + def kotlincOptions = super.kotlincOptions() ++ Seq("-verbose") + +} +---- + +For documentation please visit the https://github.com/lefou/mill-kotlin[mill-kotlin project page]. + +You will find there also a version compatibility matrix. + +== MDoc + +Simple MDoc runner for Mill + +This plugin provides a mill module that allows to execute https://scalameta.org/mdoc/[Scala MDoc] from within a mill build. +Scala MDoc simply compiles properly marked Scala snippets in plain md files and optionally runs them through an interpreter, augmenting the code with the interpreter output. + +Project home: https://github.com/atooni/mill-mdoc + +== `millw` / `millw.bat` - Mill Wrapper Scripts + +Small script to automatically fetch and execute mill build tool. + +Project home: https://github.com/lefou/millw + +`millw` is a small wrapper script around mill and works almost identical to +mill, but with additional features and compatibility with Windows. It +automatically downloads a mill release into `$HOME/.mill/download`. + +== MiMa + +Check binary compatibility with mill. + +Project home: https://github.com/lolgab/mill-mima + + +Just mix-in the `Mima` trait into your `ScalaModule`. +And set the previous artifacts you want to check binary compatibility. + +[source,scala] +---- +import mill._, scalalib._ + +import $ivy.`com.github.lolgab::mill-mima_mill0.9:0.0.2` +import com.github.lolgab.mill.mima._ + +object main extends ScalaModule with Mima { + + def mimaPreviousArtifacts = Agg( + ivy"my_group_id::main:my_previous_version" + ) + + // other settings ... + +} +---- + +You can then check the binary compatibility of the module with: + +[source,bash] +---- +> mill main.mimaReportBinaryIssues +Binary compatibility check passed. +---- + +== Missinglink + +https://github.com/spotify/missinglink[missinglink] check for Mill, ported from https://github.com/scalacenter/sbt-missinglink[sbt-missinglink]. + +Project home: https://github.com/hoangmaihuy/mill-missinglink + +_build.mill_: +[source,scala] +---- +import $ivy.`io.github.hoangmaihuy::mill-missinglink::` +import io.github.hoangmaihuy.missinglink._ + +object example extends MissinglinkCheckModule +---- + +Runtime missinglink check command + +[source,shell script] +---- +> mill example.missinglinkCheck +---- + +== Native-Image + +Build GraalVM Native-Image binaries with mill. + +Project home: https://github.com/alexarchambault/mill-native-image + +Import the plugin and add the `NativeImage` trait to your module and set some +configuration options: + +[source,scala] +---- +import $ivy.`io.github.alexarchambault.mill::mill-native-image::0.1.25` +import io.github.alexarchambault.millnativeimage.NativeImage + +object hello extends ScalaModule with NativeImage { + def scalaVersion = "3.3.0" + def ivyDeps = ... // Your deps here + + def nativeImageName = "hello" + def nativeImageMainClass = "Main" + def nativeImageGraalVmJvmId = "graalvm-java17:22.3.2" + def nativeImageClassPath = runClasspath() + def nativeImageOptions = Seq( + "--no-fallback", + "--enable-url-protocols=http,https", + "-Djdk.http.auth.tunneling.disabledSchemes=", + ) ++ (if (sys.props.get("os.name").contains("Linux")) Seq("--static") else Seq.empty) +} +---- + +Then run the `nativeImage` task to build the native-image binary. + +[source,bash] +---- +> ./mill hello.nativeImage +... +------------------------------------------------------------------------------------------------------------------------ + 5.9s (4.9% of total time) in 32 GCs | Peak RSS: 5.71GB | CPU load: 5.84 +------------------------------------------------------------------------------------------------------------------------ +Produced artifacts: + /Users/myuser/repos/scala/mill-native-image/example/out/hello/nativeImage.dest/hello (executable +) + /Users/myuser/repos/scala/mill-native-image/example/out/hello/nativeImage.dest/hello.build_artifacts.txt (txt) +======================================================================================================================== +Finished generating 'hello' in 2m 0s. +---- + +For more configuration options, building binaries inside Docker, a sample project +and more, check the project readme. + +== OSGi + +Produce OSGi Bundles with mill. + +Project home: https://github.com/lefou/mill-osgi + + +[source,scala] +---- +import mill._, mill.scalalib._ +import $ivy.`de.tototec::de.tobiasroeser.mill.osgi:0.0.5` +import de.tobiasroeser.mill.osgi._ + +object project extends ScalaModule with OsgiBundleModule { + + def bundleSymbolicName = "com.example.project" + + def osgiHeaders = Task { super.osgiHeaders().copy( + `Export-Package` = Seq("com.example.api"), + `Bundle-Activator` = Some("com.example.internal.Activator") + )} + + // other settings ... + +} +---- + +== PowerShell Completion + +Basic PowerShell completion support. + +Project home: https://github.com/sake92/mill-powershell-completion + + +== PublishM2 + +_Since Mill `0.6.1-27-f265a4` there is a built-in `publishM2Local` task in `PublishModule`._ + +Mill plugin to publish artifacts into a local Maven repository. + +Project home: https://github.com/lefou/mill-publishM2 + + +Just mix-in the `PublishM2Module` into your project. +`PublishM2Module` already extends mill's built-in `PublishModule`. + +File: `build.mill` + +[source,scala] +---- +import mill._, scalalib._, publish._ + +import $ivy.`de.tototec::de.tobiasroeser.mill.publishM2:0.0.1` +import de.tobiasroeser.mill.publishM2._ + +object project extends PublishModule with PublishM2Module { + // ... +} +---- + +Publishing to default local Maven repository + +[source,bash] +---- +> mill project.publishM2Local +[40/40] project.publishM2Local +Publishing to /home/user/.m2/repository +---- + +Publishing to custom local Maven repository + +[source,bash] +---- +> mill project.publishM2Local /tmp/m2repo +[40/40] project.publishM2Local +Publishing to /tmp/m2repo +---- + +== Rust JNI + +A plugin for build Rust JNI code! + +Project home: https://github.com/otavia-projects/mill-rust-jni + +For documentation please visit the https://github.com/otavia-projects/mill-rust-jni[mill-rust-jni project page]. + +== ScalablyTyped + +https://scalablytyped.org/[Scalablytyped] support for mill. + +Project home: https://github.com/lolgab/mill-scalablytyped + + +Mix-in the `ScalablyTyped` trait into your `ScalaJSModule` and +set-up a `package.json` file with your TypeScript dependencies. + +[source,scala] +---- +import mill._, scalalib._ + +import $ivy.`com.github.lolgab::mill-scalablytyped::0.0.2` +import com.github.lolgab.mill.scalablytyped._ + +object main extends ScalaJSModule with ScalablyTyped { + + // other settings ... + +} +---- + +It will run ScalablyTyped and add the converted dependencies to the module's `ivyDeps`. + +== Scala TSI + +https://github.com/scala-tsi/scala-tsi[scala-tsi] support for Mill + +Project home: https://github.com/hoangmaihuy/mill-scala-tsi + +_build.mill_: + +[source,scala] +---- +import $ivy.`io.github.hoangmaihuy::mill-scala-tsi::` + +import io.github.hoangmaihuy.scalatsi._ + +object example extends ScalaModule with ScalaTsiModule { +// The classes that you want to generate typescript interfaces for +override def typescriptExports = Seq("MyClass") +// The output file which will contain the typescript interfaces +override def typescriptOutputFile = millSourcePath / "model.ts" +// Include the package(s) of the classes here +// Optionally import your own TSType implicits to override default default generated +override def typescriptGenerationImports = Seq("mymodel._", "MyTypescript._") +} +---- + +_MyClass.scala_: +[source,scala] +---- +case class MyClass(foo: String, bar: Int) +---- + +Generate Typescript command: + +[source,shell script] +---- +> mill example.generateTypescript +---- + +_model.ts_: +[source] +---- +export interface IMyClass { + foo: string + bar: number +} +---- + +== Scalafix + +https://scalacenter.github.io/scalafix/[Scalafix] support for mill. + +Project home: https://github.com/joan38/mill-scalafix + +_build.mill_: + +[source,scala] +---- +import $ivy.`com.goyeau::mill-scalafix:` +import com.goyeau.mill.scalafix.ScalafixModule +import mill.scalalib._ + +object project extends ScalaModule with ScalafixModule { + def scalaVersion = "2.12.11" +} +---- + +[source,shell script] +---- +> mill project.fix +[29/29] project.fix +/project/project/src/MyClass.scala:12:11: error: [DisableSyntax.var] mutable state should be avoided + private var hashLength = 7 + ^^^ +1 tasks failed +project.fix A Scalafix linter error was reported +---- + +== SCIP (SCIP Code Intelligence Protocol) + +Support for generating https://about.sourcegraph.com/blog/announcing-scip[SCIP] +indexes from your Mill build. This is most commonly used to power intelligent +code navigation on https://sourcegraph.com/[Sourcegraph]. + +Project home: https://github.com/ckipp01/mill-scip + + +The recommended way to use `mill-scip` is via the +https://sourcegraph.github.io/scip-java/[`scip-java`] cli tool that can be +installed via https://get-coursier.io/[Coursier]. + +[source, shell script] +---- +cs install scip-java +---- + +Once you have `scip-java` installed the following command and the root of your +Mill build will generate an index and place it at the root of your project. + +[source, shell script] +---- +scip-java index +---- + +You can also manually trigger this with Mill by doing the following: + +[source, shell script, subs="attributes,verbatim"] +---- + +mill --import ivy:io.chris-kipp::mill-scip::{mill-scip-version} io.kipp.mill.scip.Scip/generate +---- + +This will then generate your `index.scip` inside of +`out/io/kipp/mill/scip/Scip/generate.dest/`. + +== Shell Completions + +As Mill is a tool often used from the CLI (Command line interface), you may be also interested in installing some completion support for your preferred shell: + +* <<_bash_completion>> +* <<_fish_completion>> +* <<_zsh_completion>> + + +== Spring Boot + +Support packaging Spring Boot Applications with Mill. + +Project home: https://github.com/lefou/mill-spring-boot + +[source,scala,subs="attributes,verbatim"] +---- +import mill._ +import mill.scalalib._ +import de.tobiasroeser.mill.spring.boot.SpringBootModule + +object app extends MavenModule with SpringBootModule { + override def springBootToolsVersion = "2.7.13" +} +---- + +[source,shell] +---- +# Package as executable Spring-Boot Application +$ mill app.springBootAssembly +---- + + +== Squery + +Source code generator for https://github.com/sake92/squery/[Squery] SQL library boilerplate. + +Project home: https://github.com/sake92/squery + +[source,scala,subs="attributes,verbatim"] +---- +import $ivy.`ba.sake::mill-squery-generator_mill0.11:0.6.2` +import mill._ +import mill.scalalib._ +import ba.sake.squery.generator._ +import ba.sake.squery.generator.mill.SqueryGeneratorModule + +object app extends ScalaModule with SqueryGeneratorModule { + // use T.input(T.ctx.env("MY_ENV_VAR")) to set sensitive variables like password etc + def squeryJdbcUrl = "jdbc:..." + def squeryUsername = ".." + def squeryPassword = ".." + def squerySchemas = Seq("myschema" -> "com.mypackage.myschema") + + // override to tweak codegen settings + def squeryGeneratorConfig: T[SqueryGeneratorConfig] = ... +---- + +[source,shell] +---- +# Generate source files +$ ./mill root.squeryGenerate +---- + + +== Universal Packager + +Support universal archive packaging for Java application with Mill, ported from sbt-native-packager. + +Project home: https://github.com/hoangmaihuy/mill-universal-packager + +[source,scala,subs="attributes,verbatim"] +---- +// build.mill +import $ivy.`io.github.hoangmaihuy::mill-universal-packager::` + +import io.github.hoangmaihuy.mill.packager.archetypes.JavaAppPackagingModule + +object example extends JavaAppPackagingModule { + override def packageVersion = "0.1.0" +} +---- + +[source,shell] +---- +# Package as zip archive with Bash start script +$ mill example.universalPackage +---- + +== VCS Version + +Mill plugin to derive a version from (last) git tag and edit state. It may support other VCS as well. + +Project home: https://github.com/lefou/mill-vcs-version + +Lots of formatting options are provided. +When used with its defaults, the outcome is identical to the version scheme that Mill itself uses. + + +[source,scala] +---- +import mill._ +import mill.scalalib._ + +// Load the plugin from Maven Central via ivy/coursier +import $ivy.`de.tototec::de.tobiasroeser.mill.vcs.version::0.1.2` +import de.tobiasroeser.mill.vcs.version.VcsVersion + +object main extends JavaModule with PublishModule { + override def publishVersion: T[String] = VcsVersion.vcsState().format() +} +---- + +== Zsh Completion + +Limited zsh completion support. + +This plugin adds ZSH shell completions to Mill. + +Project home: https://github.com/carlosedp/mill-zsh-completions diff --git a/docs/modules/ROOT/pages/extending/writing-plugins.adoc b/docs/modules/ROOT/pages/extending/writing-plugins.adoc new file mode 100644 index 00000000000..d3f24383f6d --- /dev/null +++ b/docs/modules/ROOT/pages/extending/writing-plugins.adoc @@ -0,0 +1,6 @@ += Writing Mill Plugins +:page-aliases: Writing_Mill_Plugins.adoc + +include::partial$gtag-config.adoc[] + +include::partial$example/extending/plugins/7-writing-mill-plugins.adoc[] \ No newline at end of file diff --git a/docs/modules/ROOT/pages/fundamentals/bundled-libraries.adoc b/docs/modules/ROOT/pages/fundamentals/bundled-libraries.adoc new file mode 100644 index 00000000000..779c7348d8f --- /dev/null +++ b/docs/modules/ROOT/pages/fundamentals/bundled-libraries.adoc @@ -0,0 +1,44 @@ += Bundled Libraries +:page-aliases: External_References.adoc, Bundled_Libraries.adoc + +include::partial$gtag-config.adoc[] + +Mill comes bundled with a set of external Open Source libraries and projects. + +== OS-lib + +Project page:: https://github.com/com-lihaoyi/os-lib +ScalaDoc:: https://javadoc.io/doc/com.lihaoyi/os-lib_2.13/latest/index.html + +include::partial$example/fundamentals/libraries/1-oslib.adoc[] + +== uPickle + +Project page:: https://github.com/com-lihaoyi/upickle +ScalaDoc:: https://javadoc.io/doc/com.lihaoyi/upickle_2.13/latest/index.html + +include::partial$example/fundamentals/libraries/2-upickle.adoc[] + +== Requests-Scala + +Project page:: https://github.com/com-lihaoyi/requests-scala +ScalaDoc:: https://javadoc.io/doc/com.lihaoyi/requests_2.13/latest/index.html + +include::partial$example/fundamentals/libraries/3-requests.adoc[] + +== MainArgs + + +Project page:: https://github.com/com-lihaoyi/mainargs +Scaladoc:: https://javadoc.io/doc/com.lihaoyi/mainargs_2.13/latest/index.html + +include::partial$example/fundamentals/libraries/4-mainargs.adoc[] + +== Coursier + +Coursier is the Scala application and artifact manager. Mill uses Coursier for +all third-party artifact resolution and management in JVM languages (Scala, +Java, etc.) + +Project page:: https://github.com/coursier/coursier +Documentation:: https://get-coursier.io/docs/overview \ No newline at end of file diff --git a/docs/modules/ROOT/pages/fundamentals/configuring-jvm-versions.adoc b/docs/modules/ROOT/pages/fundamentals/configuring-jvm-versions.adoc new file mode 100644 index 00000000000..8ebcd577640 --- /dev/null +++ b/docs/modules/ROOT/pages/fundamentals/configuring-jvm-versions.adoc @@ -0,0 +1,8 @@ += Configuring JVM Versions + +By default, Mill uses the same JVM that it itself is running on to compile/test/run +Java/Scala/Kotlin modules. This page goes into more detail about downloading +and using a custom Java home on a per-module basis. + +include::partial$example/depth/javahome/1-custom-jvms.adoc[] + diff --git a/docs/modules/ROOT/pages/fundamentals/cross-builds.adoc b/docs/modules/ROOT/pages/fundamentals/cross-builds.adoc new file mode 100644 index 00000000000..f04145ede85 --- /dev/null +++ b/docs/modules/ROOT/pages/fundamentals/cross-builds.adoc @@ -0,0 +1,54 @@ += Cross Builds +:page-aliases: Cross_Builds.adoc + +include::partial$gtag-config.adoc[] + +Cross-building refers to taking the same sources and configuration and building +it multiple times with minor changes. This could mean taking the same Scala +codebase and building it across multiple Scala versions, taking the same +application and building twice for dev/release, or taking the same module +config and building it across a variety of source folders. + +== Simple Cross Modules + +include::partial$example/fundamentals/cross/1-simple.adoc[] + +== Cross Modules Defaults + +include::partial$example/fundamentals/cross/11-default-cross-module.adoc[] + +== Cross Modules Source Paths + +include::partial$example/fundamentals/cross/2-cross-source-path.adoc[] + +== Using Cross Modules from Outside Tasks + +include::partial$example/fundamentals/cross/3-outside-dependency.adoc[] + +== Using Cross Modules from other Cross Modules + +include::partial$example/fundamentals/cross/4-cross-dependencies.adoc[] + +== Multiple Cross Axes + +include::partial$example/fundamentals/cross/5-multiple-cross-axes.adoc[] + +== Extending Cross Modules + +include::partial$example/fundamentals/cross/6-axes-extension.adoc[] + +== Inner Cross Modules + +include::partial$example/fundamentals/cross/7-inner-cross-module.adoc[] + +== Cross Resolvers + +include::partial$example/fundamentals/cross/8-resolvers.adoc[] + +== Dynamic Cross Modules + +include::partial$example/fundamentals/cross/9-dynamic-cross-modules.adoc[] + +== Use Case: Static Blog + +include::partial$example/fundamentals/cross/10-static-blog.adoc[] diff --git a/docs/modules/ROOT/pages/fundamentals/library-deps.adoc b/docs/modules/ROOT/pages/fundamentals/library-deps.adoc new file mode 100644 index 00000000000..b5e4c295e98 --- /dev/null +++ b/docs/modules/ROOT/pages/fundamentals/library-deps.adoc @@ -0,0 +1,326 @@ += Library Dependencies in Mill +:link-coursier: https://github.com/coursier/coursier +:link-coursier-doc: https://get-coursier.io/docs/overview +:page-aliases: Library_Dependencies.adoc + +include::partial$gtag-config.adoc[] + +Beside the dependencies between Mill modules, most non-trivial source projects have dependencies to other libraries. + +Mill uses {link-coursier}[coursier] to resolve and download dependencies. +Once downloaded, they are located in the coursier specific cache locations. +For more details about coursier, refer to the {link-coursier-doc}[coursier documentation]. + +== Dependencies in General + +Mill dependencies have the simple form: + +---- +ivy"{organization}:{name}:{version}" +---- + +Additional attributes are also supported: + +---- +ivy"{organization}:{name}:{version}[;{attribute}={value}]*" +---- + +When working in other Java and Scala projects, you will find some synonyms, which typically all mean the same. + +For example in the Maven ecosystem, the `organization` is called the `group` and the `name` is called the `artifact`. +The whole triplet is often called `GAV`. + +In Mill we use the additional term `artifactId` which is identical to the `name` when used in the normal form shown above. +When a different form is used, e.g. some double-colons are used between the parts, the `artifactId` typically contains suffixes, but the name doesn't. + +.Example for a simple Java Dependency +[source,scala] +---- +def ivyDeps = Agg( + ivy"org.slf4j:slf4j-api:1.7.25" +) +---- + + +== Test dependencies (there is no `test` scope) + +One difference between Mill and other build tools like sbt or Maven is the fact, that tests are ordinary submodules on their own. +For convenience, most modules already come with a pre-configured trait for a test submodule, +which already inherits all dependencies of their parent module. +If you need additional test dependencies, you simply add them by overriding `def ivyDeps`, as you would do with normal library dependencies. + +When migrating a sbt project and seeing a dependency like this: `"ch.qos.logback" % "logback-classic" % "1.2.3" % "test"`, +simply add it to the test module's `ivyDeps` as ordinary dependency. +There is no special test scope in Mill. + +.Example +[source,scala] +---- +object main extends JavaModule { + object test extends JavaModuleTests { + def ivyDeps = Agg( + ivy"org.qos.logback:logback-classic:1.2.3" + ) + } +} +---- + +== Compile-only dependencies (`provided` scope) + +If you want to use a dependency only at compile time, you can declare it with the `compileIvyDeps` task. + +.Example +[source,scala] +---- +def compileIvyDeps = Agg( + ivy"org.slf4j:slf4j-api:1.7.25" +) +---- + +When Mill generated file to interact with package manager like `pom.xml` for Maven repositories, such compile-only dependencies are mapped to the `provided` scope. + +Please note, that dependencies with `provided` scope will never be resolved transitively. Hence, the name "provided", as the task runtime needs to "provide" them, if they are needed. + + +== Runtime dependencies + +If you want to declare dependencies to be used at runtime (but not at compile time), you can use the `runIvyDeps` tasks. + +.Example +[source,scala] +---- +def runIvyDeps = Agg( + ivy"ch.qos.logback:logback-classic:1.2.0" +) +---- + +It is also possible to use a higher version of the same library dependencies already defined in `ivyDeps`, to ensure you compile against a minimal API version, but actually run with the latest available version. + +== Dependency management + +Dependency management consists in listing dependencies whose versions we want to force. Having +a dependency in dependency management doesn't mean that this dependency will be fetched, only +that + +* if it ends up being fetched transitively, its version will be forced to the one in dependency management + +* if its version is empty in an `ivyDeps` section in Mill, the version from dependency management will be used + +Dependency management also allows to add exclusions to dependencies, both explicit dependencies and +transitive ones. + +Dependency management can be passed to Mill in two ways: + +* via external Maven BOMs, like https://repo1.maven.org/maven2/com/google/cloud/libraries-bom/26.50.0/libraries-bom-26.50.0.pom[this one], +whose Maven coordinates are `com.google.cloud:libraries-bom:26.50.0` + +* via the `depManagement` task, that allows to directly list dependencies whose versions we want to enforce + +=== External BOMs + +include::partial$example/fundamentals/library-deps/bom-1-external-bom.adoc[] + +=== Dependency management task + +include::partial$example/fundamentals/library-deps/bom-2-dependency-management.adoc[] + +== Searching For Dependency Updates + +include::partial$example/fundamentals/dependencies/1-search-updates.adoc[] + +== Scala dependencies + +Scala major releases up until version `2.13` are binary incompatible. +That means, mixing dependencies of different binary platforms will result in non-working runtimes and obscure and hard to debug issues. + +To easily pick only a compatible version, a convention was established to append the scala major version as a suffix to the package name.footnote:[ +Scala 2 versions have the unusual version format: `{epoch}.{major}.{minor}`.] +E.g. to select the Scala 2.13 version of a library `foo`, the final `artifactId` will contain the additional suffix `_2.13`, such that the final `artifactId` is `foo_2.13`. + +To always pick the right version and support cross compiling, +you can omit the scala version and instead use a double colon (`::`) between the `organization` and the `name`, e.g. `ivy"com.typesafe.akka:akka-actor_2.12:2.5.25"`. +Your module needs to `extends ScalaModule` though. + +If you want to use dependencies that are cross-published against the full Scala version, e.g. `2.12.12`, +you can use three colons (`:::`) between `organization` and `name`, e.g.: `ivy"org.scalamacros:::paradise:2.1.1"`. + +.Example +[source,scala] +---- +def ivyDeps = Agg( + // explicit scala version suffix, NOT RECOMMENDED! + ivy"com.typesafe.akka:akka-actor_2.12:2.5.25", + ivy"com.typesafe.akka::akka-actor:2.5.25", + ivy"org.scalamacros:::paradise:2.1.1" +) +---- + +== Scala 3 interoperability + +Since the release of Scala 3, the binary compatibility story for Scala has changed. +That means, Scala 3 dependencies can be mixed with Scala 2.13 dependencies. +In fact, the Scala 3 standard library is the same as for Scala 2.13. + + +[CAUTION] +-- +As Scala 3 and Scala 2.13 have different binary platforms, but their artifacts are in general compatible, this introduces new challenges. + +There is currently no mechanism, that impedes to bring the same dependency twice into the classpath (one for Scala 2.13 and one for Scala 3). +-- + + +=== Using Scala 2.13 from Scala 3 + +If your Scala version is a Scala 3.x, but you want to use the Scala 2.13 version of a specific library, you can use the `.withDottyCompat` method on that dependency. + +.Example: +[source,scala] +---- +def scalaVersion = "3.2.1" +def ivyDeps = Agg( + ivy"com.lihaoyi::upickle:2.0.0".withDottyCompat(scalaVersion()) //1 +) +---- +<1> This will result in a Scala 2.13 dependency `com.lihaoyi::upicke_2.13:2.0.0` + + +[NOTE] +-- +Do you wonder where the name "dotty" comes from? + +In the early development of Scala 3, the Scala 3 compiler was called "Dotty". Later, the name was changed to Scala 3, but the compiler project itself is still named "dotty". + +The dotty compiler itself is an implementation of the http://lampwww.epfl.ch/~amin/dot/fool.pdf[Dependent Object Types (DOT) calculus], which is the new basis of Scala 3. It also enhances the type system to a next level and allows features like union-types and intersection-types. +-- + +== Detecting transitive dependencies + +To render a tree of dependencies (transitive included) you can run `mill myModule.ivyDepsTree`. Here is how the start of `./mill __.ivyDepsTree` looks like in the `mill` project itself: + +[source,text] +---- +├─ ch.epfl.scala:bsp4j:2.1.0-M3 +│ ├─ org.eclipse.lsp4j:org.eclipse.lsp4j.generator:0.12.0 +│ │ ├─ org.eclipse.lsp4j:org.eclipse.lsp4j.jsonrpc:0.12.0 +│ │ │ └─ com.google.code.gson:gson:2.9.1 +│ │ └─ org.eclipse.xtend:org.eclipse.xtend.lib:2.24.0 +│ │ ├─ org.eclipse.xtend:org.eclipse.xtend.lib.macro:2.24.0 +│ │ │ └─ org.eclipse.xtext:org.eclipse.xtext.xbase.lib:2.24.0 +... +│ │ ├─ com.lihaoyi:fastparse_2.13:2.3.0 +│ │ │ ├─ com.lihaoyi:geny_2.13:0.6.0 -> 0.7.1 (possible incompatibility) +│ │ │ │ └─ org.scala-lang:scala-library:2.13.10 +│ │ │ └─ com.lihaoyi:sourcecode_2.13:0.2.1 -> 0.3.0 (possible incompatibility) +---- + +After compiling your module(s) you can find and examine files such as `ivyDeps.json` and `transitiveIvyDeps.json` in your `out` build's folder for a given module. +After running the `ivyDepsTree` command you'll also find the `ivyDepsTree.json` and `ivyDepsTree.log` file that contain the output of the above `ivyDepsTree` command. + +You can observe the actual version being used by running `mill show myModule.resolvedIvyDeps`. If you run `mill myModule.resolvedIvyDeps`, the same information is available in `out/myModule/resolvedIvyDeps.json`. + +=== Figuring out where a dependency comes from + +There will be times when you want to figure out where a dependency is coming +from. The output of `ivyDepsTree` can be quite large in larger projects so the +command provides a nice utility to be able to target the part of the tree that +brings in a specific dependency. + +For example, let's again use the Mill codebase as an example. We'll search the +tree in the `main` module and try to find where the `jsoniter-scala-core_2.13` +artifact is coming from using the `--whatDependsOn` argument: + +[source,txt] +---- +❯ ./mill -i dev.run ~/Documents/scala-workspace/com-lihaoyi/mill main.ivyDepsTree --whatDependsOn com.github.plokhotnyuk.jsoniter-scala:jsoniter-scala-core_2.13 +[33/33] main.ivyDepsTree +└─ com.github.plokhotnyuk.jsoniter-scala:jsoniter-scala-core_2.13:2.13.5 + ├─ io.get-coursier:coursier_2.13:2.1.0-RC1 + └─ org.virtuslab.scala-cli:config_2.13:0.1.16 + └─ io.get-coursier:coursier-cache_2.13:2.1.0-RC1 + └─ io.get-coursier:coursier_2.13:2.1.0-RC1 +---- + +By looking at the output we can see that it's our dependency on `coursier_2.13` +that is bringing in the `jsoniter-scala-core_2.13` artifact. + +The `--whatDependsOn` argument can also be repeated to target multiple +artifacts at once. Just repeat the `--whatDependsOn ` pattern. Note +that the artifact pattern follows the `org:artifact` convention. You can't +include a version as the utility will show you all usages of the `artifact`. +Also note that when using `--whatDependsOn` on usage of `--inverse` is forced +in order to make the tree appear in an inverted manner to more easily show you +where the dependency is coming from. + +== Excluding transitive dependencies + +You can use the `.exclude` method on a dependency. It accepts `organization` and `name` tuples, to be excluded. +Use the special name `*` to match all ``organization``s or ``name``s. + +.Example: Exclude `fansi_2.12` library from transitive dependency set of `pprint`. +[source,scala] +---- +def deps = Agg( + ivy"com.lihaoyi::pprint:0.5.3".exclude("com.lihaoyi" -> "fansi_2.12") +) +---- + +You can also use `.excludeOrg` or `excludeName`: + +There is also a short notation available: + +.Example: Shot notation to exclude `fansi_2.12` library from transitive dependency set of `pprint`. +[source,scala] +---- +def deps = Agg( + ivy"com.lihaoyi::pprint:0.5.3;exclude=com.lihaoyi:fansi_2.12" +) +---- + +.Example: Exclude all `com.lihaoyi` libraries from transitive dependency set of `pprint`. +[source,scala] +---- +val deps = Agg(ivy"com.lihaoyi::pprint:0.5.3".excludeOrg("com.lihaoyi")) +---- + +Note: You can chain multiple exclusions with `exclude`, `excludeOrg`, and `excludeName`. + +.Example: Excluding a library (fansi) by name from transitive dependency set of `pprint`. +[source,scala] +---- +val deps = Agg( + ivy"com.lihaoyi::pprint:0.5.3" + .excludeName("fansi_2.12") + .excludeName("sourcecode") +) +---- + +== Forcing versions + +CAUTION: Please treat forceVersion as experimental; it has some bugs and isn't production-ready (forced versions https://github.com/com-lihaoyi/mill/issues/1975[aren't propagated to published artifacts]). + +You can use the `forceVersion` method to ensure the used version of a dependency is what you have declared. + +* You declare a dependency `val deps = Agg(ivy"com.lihaoyi::fansi:0.2.14")` +* There is another dependency, `val deps = Agg(ivy"com.lihaoyi::PPrint:0.8.1")` +* PPrint 0.8.1 uses fansi 0.4.0, so it is a transitive dependency +* `mill show myModule.resolvedIvyDeps | grep "fansi"` should show fansi 0.4.0 +* If you want to force to the older version (to prevent it being evicted, and replaced by 0.4.0) then you can use `val deps = Agg(ivy"com.lihaoyi::fansi:0.2.14".forceVersion())` +* `mill show myModule.resolvedIvyDeps | grep "fansi"` should show fansi 0.2.14 + +== ScalaJS dependencies + +Scala.js introduces an additional binary platform axis. +To the already required Scala version, there comes the Scala.js version. + +You can use two colons (`::`) between `name` and `version` to define a Scala.js dependency. +Your module needs to `extends ScalaJSModule` to accept Scala.js dependencies. + +== Scala Native dependencies + +Scala Native introduces an additional binary platform axis. +To the already required Scala version, there comes the Scala Native version. + +You can use two colons (`::`) between `name` and `version` to define a Scala Native dependency. +Your module needs to `extends ScalaNativeModule` to accept Scala Native dependencies. + diff --git a/docs/modules/ROOT/pages/fundamentals/modules.adoc b/docs/modules/ROOT/pages/fundamentals/modules.adoc new file mode 100644 index 00000000000..f6f8ac0155b --- /dev/null +++ b/docs/modules/ROOT/pages/fundamentals/modules.adoc @@ -0,0 +1,78 @@ += Modules +:page-aliases: Modules.adoc + +include::partial$gtag-config.adoc[] + +`mill.Module` serves two main purposes: + +1. As ``object``s, they serve as namespaces that let you group related ``Task``s + together to keep things neat and organized. + +2. As ``trait``s, they are re-usable templates that let you replicate groups + of related ``Task``s and sub-``Module``s while allowing customizations + +Mill's comes with built in modules such as `mill.scalalib.ScalaModule` and +`mill.scalalib.CrossSbtModule`, but you can also define your own modules to do +things that are not built-in to Mill. + +include::partial$example/fundamentals/modules/6-modules.adoc[] + +== RootModule + +include::partial$example/fundamentals/modules/7-root-module.adoc[] + +== Use Case: DIY Java Modules + +include::partial$example/fundamentals/modules/8-diy-java-modules.adoc[] + +== Backticked Names + +include::partial$example/fundamentals/modules/9-backticked-names.adoc[] + + +== External Modules + +Libraries for use in Mill can define ``ExternalModule``s: ``Module``s which are +shared between all builds which use that library: + +[source,scala] +---- +package foo +import mill._ + +object Bar extends mill.define.ExternalModule { + def baz = Task { 1 } + def qux() = Task.Command { println(baz() + 1) } + + lazy val millDiscover = mill.define.Discover[this.type] +} +---- + +In the above example, `Bar` is an `ExternalModule` living within the `foo` +Java package, containing the `baz` task and `qux` command. Those can be run +from the command line via: + +[source,bash] +---- +mill foo.Bar/baz +mill foo.Bar/qux +---- + +``ExternalModule``s are useful for someone providing a library for use with Mill +that is shared by the entire build: for example, +`mill.scalalib.ZincWorkerApi/zincWorker` provides a shared Scala compilation +service & cache that is shared between all ``ScalaModule``s, and +`mill.scalalib.GenIdea/idea` lets you generate IntelliJ projects without +needing to define your own `Task.Command` in your `build.mill` file + +== Aliasing External Modules + +include::partial$example/fundamentals/modules/10-external-module-aliases.adoc[] + +== Abstract Modules References + +include::partial$example/fundamentals/modules/11-abstract-module-ref.adoc[] + +== Default Tasks + +include::partial$example/fundamentals/modules/12-default-tasks.adoc[] \ No newline at end of file diff --git a/docs/modules/ROOT/pages/fundamentals/out-dir.adoc b/docs/modules/ROOT/pages/fundamentals/out-dir.adoc new file mode 100644 index 00000000000..5bd7501ccdc --- /dev/null +++ b/docs/modules/ROOT/pages/fundamentals/out-dir.adoc @@ -0,0 +1,11 @@ += The Output Directory +:page-aliases: Out_Dir.adoc + +include::partial$gtag-config.adoc[] + + +include::partial$example/fundamentals/out-dir/1-out-files.adoc[] + +== Using another location than the `out/` directory + +include::partial$example/fundamentals/out-dir/2-custom-out.adoc[] diff --git a/docs/modules/ROOT/pages/fundamentals/tasks.adoc b/docs/modules/ROOT/pages/fundamentals/tasks.adoc new file mode 100644 index 00000000000..fcd22e38499 --- /dev/null +++ b/docs/modules/ROOT/pages/fundamentals/tasks.adoc @@ -0,0 +1,59 @@ += Tasks +:page-aliases: Tasks.adoc + +include::partial$gtag-config.adoc[] + +One of Mill's core abstractions is its _Task Graph_: this is how Mill defines, +orders and caches work it needs to do, and exists independently of any support +for building Java, Kotlin, or Scala. + +Mill task graphs are primarily built using methods and macros defined on +`mill.define.Task`, aliased as `T` for conciseness: + +- {mill-doc-url}/api/latest/mill/define/Task$.html[mill.define.Task] + +== Task Cheat Sheet + +The following table might help you make sense of the small collection of +different Task types: + +[cols="<,<,<,<,<,<,<"] +|=== +| |Target |Command |Source/Input |Anonymous Task |Persistent Task |Worker + +|Cached to Disk |X | | | |X | +|JSON Writable |X |X |X| |X | +|JSON Readable |X | | | |X | +|CLI Runnable |X |X | | |X | +|Takes Arguments | |X | |X | | +|Cached In-Memory | | | | | |X +|=== + +include::partial$example/fundamentals/tasks/1-task-graph.adoc[] + +[#primitive-tasks] +== Primary Tasks + +include::partial$example/fundamentals/tasks/2-primary-tasks.adoc[] + +== Other Tasks + +=== Anonymous Tasks + +include::partial$example/fundamentals/tasks/3-anonymous-tasks.adoc[] + +=== Inputs + +include::partial$example/fundamentals/tasks/4-inputs.adoc[] + +=== Persistent Tasks + +include::partial$example/fundamentals/tasks/5-persistent-tasks.adoc[] + +=== Workers + +include::partial$example/fundamentals/tasks/6-workers.adoc[] + +== (Experimental) Forking Concurrent Futures within Tasks + +include::partial$example/fundamentals/tasks/7-forking-futures.adoc[] diff --git a/docs/modules/ROOT/pages/index.adoc b/docs/modules/ROOT/pages/index.adoc new file mode 100644 index 00000000000..e066be05930 --- /dev/null +++ b/docs/modules/ROOT/pages/index.adoc @@ -0,0 +1,124 @@ += Mill: A Fast JVM Build Tool + +include::partial$gtag-config.adoc[] + + +Mill is a fast, scalable, multi-language build tool that supports Java, Scala, +and Kotlin: + +* Mill can build the same Java codebase xref:comparisons/maven.adoc[4-10x faster than Maven], +or xref:comparisons/gradle.adoc[2-4x faster than Gradle] + +* Mill's typed config language and immutable xref:depth/design-principles.adoc[task graph] +helps keep builds clean and understandable + +* Mill is an easier alternative to https://bazel.build/[Bazel] +for xref:large/large.adoc[large multi-language monorepos] with hundreds of modules + +To get started using Mill, see: + +* xref:cli/installation-ide.adoc[] + +Or see the language-specific introductory documentation linked below: + +[cols="1a,1a,1a"] +|=== +| xref:javalib/intro.adoc[image:index/iconscout-java.svg[java,32] Java with Mill] +| xref:scalalib/intro.adoc[image:index/iconscout-scala.svg[scala,20] Scala with Mill] +| xref:kotlinlib/intro.adoc[image:index/iconscout-kotlin.svg[kotlin,32] Kotlin with Mill] +|=== + +If you want to use Mill on an existing codebase, see xref:migrating/migrating.adoc[] + +=== Why Mill? + +Although the Java compiler is very fast and the Java language is easy to learn, +JVM build tools have a reputation for being sluggish and confusing. Mill tries to +offer a better alternative, letting your build system take full advantage of the +Java platform's performance and usability: + +* *Performance*: Mill automatically +xref:depth/execution-model.adoc#_caching_in_mill[caches] +and xref:cli/flags.adoc#_jobs_j[parallelizes] build tasks to keep local development fast, +and avoids the long configuration times seen in other tools like Gradle or SBT. +xref:large/selective-execution.adoc[Selective execution] keeps +CI validation times short by only running the tests necessary to validate a code change. + +* *Maintainability*: Mill's config and xref:javalib/intro.adoc#_custom_build_logic[custom logic] +is written in xref:depth/why-scala.adoc[concise type-checked Scala code], +with an immutable xref:depth/design-principles.adoc[module tree and task graph]. This +catches config issues early, and helps both humans and IDEs +(xref:cli/installation-ide.adoc#_intellij[IntelliJ] or +xref:cli/installation-ide.adoc#_vscode[VSCode]) +understand your Mill build better than any other build system + +* *Flexibility*: Mill's tasks and modules allow anything from adding +xref:fundamentals/tasks.adoc#primitive-tasks[simple build steps], up to +entire xref:extending/example-python-support.adoc[language toolchains]. +You can xref:extending/import-ivy-plugins.adoc[import any JVM library] in your build, +use Mill's rich ecosystem of xref:extending/thirdparty-plugins.adoc[Third-Party Mill Plugins], +or xref:extending/writing-plugins.adoc[write plugins yourself] and +publish them to Maven Central for others to use. + + +For a quick introduction of why you may care about Mill, see the following page: + +* xref:comparisons/why-mill.adoc[] +* xref:comparisons/unique.adoc[] + +Or if you prefer a video introduction: + +* https://www.youtube.com/watch?v=Dry6wMRN6MI[Video: Better Java Builds with the Mill Build Tool], + Japan Java User Group Fall Conference Oct 2024 +* https://www.youtube.com/watch?v=xbGG7MDWR-M[Video: Better Scala Builds with the Mill Build Tool], + Scala.IO Paris Conference Nov 2024 + +Mill is used to build many real-world projects, such as the +https://github.com/swaldman/c3p0[C3P0 JDBC Connection Pool], +https://github.com/coursier/coursier[Coursier dependency resolver], +https://github.com/com-lihaoyi/Ammonite[Ammonite REPL], and the +https://github.com/SpinalHDL/SpinalHDL[SpinalHDL] and +https://github.com/chipsalliance/chisel[Chisel] hardware design frameworks. +Mill can be used for applications built on top of common JVM frameworks like +Spring Boot (both xref:javalib/web-examples.adoc#_spring_boot_todomvc_app[in Java] +and xref:kotlinlib/web-examples.adoc#_spring_boot_todomvc_app[in Kotlin]), +xref:javalib/web-examples.adoc#_micronaut_todomvc_app[Micronaut], +or xref:kotlinlib/web-examples.adoc#_ktor_todomvc_app[Ktor]. + +Mill borrows ideas from other tools like https://maven.apache.org/[Maven], +https://gradle.org/[Gradle], https://bazel.build/[Bazel], but tries to learn from the +strengths of each tool and improve on their weaknesses. For comparisons with existing +build tools, check out these pages: + + +[cols="1a,1a,1a"] +|=== +| xref:comparisons/maven.adoc[image:index/maven.png[maven,24] Mill vs Maven] +| xref:comparisons/gradle.adoc[image:index/gradle.svg[gradle,32] Mill vs Gradle] +| xref:comparisons/sbt.adoc[image:index/sbt.png[sbt,32] Mill vs SBT] +|=== + +=== Contributing and Community + +If you want to contribute to Mill, or are interested in the fundamental ideas behind +Mill rather than the user-facing benefits discussed above, check out the page on +xref:depth/design-principles.adoc[Mill Design Principles]. In particular, this video +is a must-watch to help you understand the fundamental concepts behind Mill and why +they are interesting: + +* https://www.youtube.com/watch?v=UsXgCeU-ovI[Video: A Deep Dive into the Mill Build Tool] + +To engage Mill with the community, you can use the channels below: + +* https://github.com/com-lihaoyi/mill/discussions[Mill Github Discussions] +* https://discord.com/invite/scala[Scala Discord], in the `TOOLING#mill` channel + +Mill maintains an open list of issue bounties below, with payouts ranging from +500USD to 3000USD per issue. Feel free to look through the list and try your +hand at any bounties that may interest you + +* https://github.com/orgs/com-lihaoyi/discussions/6[Open Issue Bounties] + +Commercial support and services related to Mill are available if you need help with +training, adoption, or maintenance in your organization. Email info@mill-build.org +with details on what you need and we can make an arrangement. diff --git a/docs/modules/ROOT/pages/javalib/build-examples.adoc b/docs/modules/ROOT/pages/javalib/build-examples.adoc new file mode 100644 index 00000000000..991c0cec76b --- /dev/null +++ b/docs/modules/ROOT/pages/javalib/build-examples.adoc @@ -0,0 +1,24 @@ += Java Build Examples +:page-aliases: Java_Build_Examples.adoc + +include::partial$gtag-config.adoc[] + +== Example Builds for Real Projects + +Mill comes bundled with example builds for real-world open-source projects, +demonstrating how Mill can be used to build code outside of tiny example codebases: + +=== JimFS + +include::partial$example/thirdparty/jimfs.adoc[] + +=== Apache Commons IO + +include::partial$example/thirdparty/commons-io.adoc[] + +== Real World Mill Builds + +=== C3P0 + +https://github.com/swaldman/c3p0[C3P0] is a JDBC connection pooling library +written in Java, built using the Mill build tool. diff --git a/docs/modules/ROOT/pages/javalib/dependencies.adoc b/docs/modules/ROOT/pages/javalib/dependencies.adoc new file mode 100644 index 00000000000..ac2c01aef22 --- /dev/null +++ b/docs/modules/ROOT/pages/javalib/dependencies.adoc @@ -0,0 +1,33 @@ += Java Library Dependencies + +include::partial$gtag-config.adoc[] + +This page goes into more detail about configuring third party dependencies +for `JavaModule`. + +== Adding Ivy Dependencies + +include::partial$example/javalib/dependencies/1-ivy-deps.adoc[] + +== Runtime and Compile-time Dependencies + +include::partial$example/javalib/dependencies/2-run-compile-deps.adoc[] + +== Dependency Management + +Mill has support for dependency management, see the +xref:fundamentals/library-deps.adoc#_dependency_management[Dependency Management section] +in xref:fundamentals/library-deps.adoc[]. + +== Unmanaged Jars + +include::partial$example/javalib/dependencies/3-unmanaged-jars.adoc[] + +== Downloading Unmanaged Jars + +include::partial$example/javalib/dependencies/4-downloading-unmanaged-jars.adoc[] + +== Repository Config + +include::partial$example/javalib/dependencies/5-repository-config.adoc[] + diff --git a/docs/modules/ROOT/pages/javalib/intro.adoc b/docs/modules/ROOT/pages/javalib/intro.adoc new file mode 100644 index 00000000000..f8d5f124390 --- /dev/null +++ b/docs/modules/ROOT/pages/javalib/intro.adoc @@ -0,0 +1,32 @@ + += Building Java with Mill +:page-aliases: Intro_to_Mill.adoc, Intro_to_Mill_for_Java.adoc, Java_Intro_to_Mill.adoc + +include::partial$gtag-config.adoc[] + + +:language: Java +:language-small: java + +include::partial$Intro_Header.adoc[] + +== Simple Java Module + +include::partial$example/javalib/basic/1-simple.adoc[] + +== Custom Build Logic + +include::partial$example/javalib/basic/2-custom-build-logic.adoc[] + +== Multi-Module Project + +include::partial$example/javalib/basic/3-multi-module.adoc[] + +== Maven-Compatible Modules + +include::partial$example/javalib/basic/4-compat-modules.adoc[] + + +== Realistic Java Example Project + +include::partial$example/javalib/basic/6-realistic.adoc[] diff --git a/docs/modules/ROOT/pages/javalib/linting.adoc b/docs/modules/ROOT/pages/javalib/linting.adoc new file mode 100644 index 00000000000..316072591c1 --- /dev/null +++ b/docs/modules/ROOT/pages/javalib/linting.adoc @@ -0,0 +1,36 @@ += Linting Java Projects +:page-aliases: Linting_Java_Projects.adoc + +include::partial$gtag-config.adoc[] + +This page will discuss common topics around enforcing the code quality of Java +codebases using the Mill build tool + + +== Linting with ErrorProne + +include::partial$example/javalib/linting/1-error-prone.adoc[] + +== Linting with Checkstyle + +include::partial$example/javalib/linting/2-checkstyle.adoc[] + +== AutoFormatting with Palantir Java Format + +include::partial$example/javalib/linting/3-palantirformat.adoc[] + +== Code Coverage with Jacoco + +Mill supports Java code coverage analysis via the mill-jacoco plugin. See the +plugin repository documentation for more details: + +* https://github.com/lefou/mill-jacoco + +== Binary Compatibility Enforcement + +If you want to lint against binary compatibility breakages, e.g. when developing +an upstream library that downstream libraries may compile against, you can use +the Lightbend Migration Manager (MiMa) tool via the mill-mima plugin. See +the mill-mima documentation for more details: + +* https://github.com/lolgab/mill-mima \ No newline at end of file diff --git a/docs/modules/ROOT/pages/javalib/module-config.adoc b/docs/modules/ROOT/pages/javalib/module-config.adoc new file mode 100644 index 00000000000..dff0272c4a9 --- /dev/null +++ b/docs/modules/ROOT/pages/javalib/module-config.adoc @@ -0,0 +1,54 @@ += Java Module Configuration +:page-aliases: Java_Module_Config.adoc + +include::partial$gtag-config.adoc[] + +:language: Java +:language-small: java + +This page goes into more detail about the various configuration options +for `JavaModule`. + +Many of the APIs covered here are listed in the API documentation: + +* {mill-doc-url}/api/latest/mill/scalalib/JavaModule.html[mill.javalib.JavaModule] + +== Common Configuration Overrides + +include::partial$example/javalib/module/1-common-config.adoc[] + +== Compilation & Execution Flags + +include::partial$example/javalib/module/4-compilation-execution-flags.adoc[] + + +== Classpath and Filesystem Resources + +include::partial$example/javalib/module/7-resources.adoc[] + +== Annotation Processors + +include::partial$example/javalib/module/8-annotation-processors.adoc[] + +== Javadoc Config + +include::partial$example/javalib/module/9-docjar.adoc[] + +[[specifying-main-class]] +== Specifying the Main Class + +include::partial$example/javalib/module/11-main-class.adoc[] + + +== Custom Tasks + +include::partial$example/javalib/module/2-custom-tasks.adoc[] + +== Overriding Tasks + +include::partial$example/javalib/module/3-override-tasks.adoc[] + +== Native C Code with JNI + +include::partial$example/javalib/module/15-jni.adoc[] + diff --git a/docs/modules/ROOT/pages/javalib/publishing.adoc b/docs/modules/ROOT/pages/javalib/publishing.adoc new file mode 100644 index 00000000000..d5f29e9454f --- /dev/null +++ b/docs/modules/ROOT/pages/javalib/publishing.adoc @@ -0,0 +1,56 @@ += Java Packaging & Publishing +:page-aliases: Publishing_Java_Projects.adoc + +include::partial$gtag-config.adoc[] + +This page will discuss common topics around packaging and publishing your Java projects for others to use + +== Customizing the Assembly + +include::partial$example/javalib/publishing/1-assembly-config.adoc[] + + + +== Publishing Locally + +include::partial$example/javalib/publishing/2-publish-module.adoc[] + +== Checking API compatibility + +Mill provides the ability to check API changes with the https://revapi.org/revapi-site/main/index.html[Revapi] analysis and change tracking tool. + +include::partial$example/javalib/publishing/3-revapi.adoc[] + +CAUTION: The `revapi` task does not fail if incompatibilities are reported. You should fix these, and verify by re-running `revapi`, before a release. + +[TIP] +==== +The `revapi` task returns the path to a directory that can be used to resolve the relative path to any extension configuration output. +[source,json] +---- +[ + { + "extension": "revapi.reporter.text", + "configuration": { + "minSeverity": "BREAKING", + "output": "report.txt" + } + } +] +---- +==== + +include::partial$Publishing_Footer.adoc[] + +== Java App and Bundles using `jlink` + +include::partial$example/javalib/publishing/5-jlink.adoc[] + +== Java Installers using `jpackage` + +include::partial$example/javalib/publishing/6-jpackage.adoc[] + + +== Building Native Image with Graal VM + +include::partial$example/javalib/publishing/7-native-image.adoc[] diff --git a/docs/modules/ROOT/pages/javalib/testing.adoc b/docs/modules/ROOT/pages/javalib/testing.adoc new file mode 100644 index 00000000000..0890139620a --- /dev/null +++ b/docs/modules/ROOT/pages/javalib/testing.adoc @@ -0,0 +1,30 @@ += Testing Java Projects +:page-aliases: Testing_Java_Projects.adoc + +include::partial$gtag-config.adoc[] + +This page will discuss common topics around working with test suites using the Mill build tool + +== Defining Unit Test Suites + +include::partial$example/javalib/testing/1-test-suite.adoc[] + + +== Test Dependencies + +include::partial$example/javalib/testing/2-test-deps.adoc[] + +== Defining Integration Test Suites + +include::partial$example/javalib/testing/3-integration-suite.adoc[] + +== Test Grouping + +include::partial$example/javalib/testing/4-test-grouping.adoc[] + + +== Github Actions Test Reports + +If you use Github Actions for CI, you can use https://github.com/mikepenz/action-junit-report in +your pipeline to render the generated `test-report.xml` files nicely on Github. See +https://github.com/com-lihaoyi/mill/pull/4218/files for an example integration \ No newline at end of file diff --git a/docs/modules/ROOT/pages/javalib/web-examples.adoc b/docs/modules/ROOT/pages/javalib/web-examples.adoc new file mode 100644 index 00000000000..7d33d58f35d --- /dev/null +++ b/docs/modules/ROOT/pages/javalib/web-examples.adoc @@ -0,0 +1,28 @@ += Java Web Project Examples +:page-aliases: Java_Web_Examples.adoc + +include::partial$gtag-config.adoc[] + +This page contains examples of using Mill as a build tool for web-applications. +It covers setting up a basic backend server with a variety of server frameworks + +== Jetty Hello World App + +include::partial$example/javalib/web/1-hello-jetty.adoc[] + + +== Spring Boot Hello World App + +include::partial$example/javalib/web/2-hello-spring-boot.adoc[] + +== Spring Boot TodoMvc App + +include::partial$example/javalib/web/3-todo-spring-boot.adoc[] + +== Micronaut Hello World App + +include::partial$example/javalib/web/4-hello-micronaut.adoc[] + +== Micronaut TodoMvc App + +include::partial$example/javalib/web/5-todo-micronaut.adoc[] \ No newline at end of file diff --git a/docs/modules/ROOT/pages/javascriptlib/dependencies.adoc b/docs/modules/ROOT/pages/javascriptlib/dependencies.adoc new file mode 100644 index 00000000000..0e88c9c4792 --- /dev/null +++ b/docs/modules/ROOT/pages/javascriptlib/dependencies.adoc @@ -0,0 +1,22 @@ += Typescript Library Dependencies + +include::partial$gtag-config.adoc[] + +This page goes into more detail about configuring third party dependencies +for `TypescriptModule`s. + +== Adding Dependencies + +include::partial$example/javascriptlib/dependencies/1-npm-deps.adoc[] + +== Unmanaged Packages + +include::partial$example/javascriptlib/dependencies/2-unmanaged-packages.adoc[] + +== Downloading Unmanaged Packages + +include::partial$example/javascriptlib/dependencies/3-downloading-unmanaged-packages.adoc[] + +== Using Custom or Private Registries + +include::partial$example/javascriptlib/dependencies/4-repository-config.adoc[] diff --git a/docs/modules/ROOT/pages/javascriptlib/intro.adoc b/docs/modules/ROOT/pages/javascriptlib/intro.adoc new file mode 100644 index 00000000000..081b8374de8 --- /dev/null +++ b/docs/modules/ROOT/pages/javascriptlib/intro.adoc @@ -0,0 +1,31 @@ += Building Javascript with Mill + +include::partial$gtag-config.adoc[] + +:language: Typescript +:language-small: typescript + +== Simple Typescript Module + +include::partial$example/javascriptlib/basic/1-simple.adoc[] + +== React Module + +include::partial$example/javascriptlib/basic/2-react.adoc[] + +== Custom Build Logic + +include::partial$example/javascriptlib/basic/3-custom-build-logic.adoc[] + +== Multi Module Project + +include::partial$example/javascriptlib/basic/4-multi-modules.adoc[] + +== Simple Client-Server + +include::partial$example/javascriptlib/basic/5-client-server-hello.adoc[] + +== Realistic Client-Server Example Project + +include::partial$example/javascriptlib/basic/6-client-server-realistic.adoc[] + diff --git a/docs/modules/ROOT/pages/javascriptlib/module-config.adoc b/docs/modules/ROOT/pages/javascriptlib/module-config.adoc new file mode 100644 index 00000000000..4e5af0731a6 --- /dev/null +++ b/docs/modules/ROOT/pages/javascriptlib/module-config.adoc @@ -0,0 +1,30 @@ += Typescript Module Configuration + +include::partial$gtag-config.adoc[] + +:language: Typescript +:language-small: typescript + +== Common Configuration Overrides + +include::partial$example/javascriptlib/module/1-common-config.adoc[] + +== Custom Tasks + +include::partial$example/javascriptlib/module/2-custom-tasks.adoc[] + +== Overriding Tasks + +include::partial$example/javascriptlib/module/3-override-tasks.adoc[] + +== Compilation & Execution Flags + +include::partial$example/javascriptlib/module/4-compilation-execution-flags.adoc[] + +== Filesystem Resources + +include::partial$example/javascriptlib/module/5-resources.adoc[] + +== Bundling Configuration + +include::partial$example/javascriptlib/module/6-executable-config.adoc[] diff --git a/docs/modules/ROOT/pages/javascriptlib/testing.adoc b/docs/modules/ROOT/pages/javascriptlib/testing.adoc new file mode 100644 index 00000000000..ce86231289b --- /dev/null +++ b/docs/modules/ROOT/pages/javascriptlib/testing.adoc @@ -0,0 +1,21 @@ += Testing Typescript Projects + +include::partial$gtag-config.adoc[] + +This page will discuss common topics around working with test suites using the Mill build tool + +== Defining Unit Test Suites + +include::partial$example/javascriptlib/testing/1-test-suite.adoc[] + +== Test Dependencies + +include::partial$example/javascriptlib/testing/2-test-deps.adoc[] + +== Integration Suite with Cypress + +include::partial$example/javascriptlib/testing/3-integration-suite-cypress.adoc[] + +== Integration Suite with PlayWright + +include::partial$example/javascriptlib/testing/3-integration-suite-playwright.adoc[] \ No newline at end of file diff --git a/docs/modules/ROOT/pages/kotlinlib/build-examples.adoc b/docs/modules/ROOT/pages/kotlinlib/build-examples.adoc new file mode 100644 index 00000000000..7b20349bcea --- /dev/null +++ b/docs/modules/ROOT/pages/kotlinlib/build-examples.adoc @@ -0,0 +1,5 @@ += (TODO) Kotlin Build Examples +:page-aliases: Kotlin_Build_Examples.adoc + +include::partial$gtag-config.adoc[] + diff --git a/docs/modules/ROOT/pages/kotlinlib/dependencies.adoc b/docs/modules/ROOT/pages/kotlinlib/dependencies.adoc new file mode 100644 index 00000000000..9670e311702 --- /dev/null +++ b/docs/modules/ROOT/pages/kotlinlib/dependencies.adoc @@ -0,0 +1,38 @@ += Kotlin Library Dependencies + +include::partial$gtag-config.adoc[] + +This page goes into more detail about the various configuration options +for `KotlinModule`. + +Many of the APIs covered here are listed in the API documentation: + +* {mill-doc-url}/api/latest/mill/kotlinlib/KotlinModule.html[mill.kotlinlib.KotlinModule] + + + +== Adding Ivy Dependencies + +include::partial$example/kotlinlib/dependencies/1-ivy-deps.adoc[] + +== Runtime and Compile-time Dependencies + +include::partial$example/kotlinlib/dependencies/2-run-compile-deps.adoc[] + +== Dependency Management + +Mill has support for dependency management, see the +xref:fundamentals/library-deps.adoc#_dependency_management[Dependency Management section] +in xref:fundamentals/library-deps.adoc[]. + +== Unmanaged Jars + +include::partial$example/kotlinlib/dependencies/3-unmanaged-jars.adoc[] + +== Downloading Unmanaged Jars + +include::partial$example/kotlinlib/dependencies/4-downloading-unmanaged-jars.adoc[] + +== Repository Config + +include::partial$example/kotlinlib/dependencies/5-repository-config.adoc[] diff --git a/docs/modules/ROOT/pages/kotlinlib/intro.adoc b/docs/modules/ROOT/pages/kotlinlib/intro.adoc new file mode 100644 index 00000000000..6de8bc33ee7 --- /dev/null +++ b/docs/modules/ROOT/pages/kotlinlib/intro.adoc @@ -0,0 +1,44 @@ + += Building Kotlin with Mill +:page-aliases: Kotlin_Intro_to_Mill.adoc + +include::partial$gtag-config.adoc[] + +:language: Kotlin +:language-small: kotlin + +include::partial$Intro_Header.adoc[] + +NOTE: Mill Kotlin support is currently focused on the JVM, with a lot of APIs +still under active development. It is expected to continue evolving over time +as Android and Multiplatform support is fleshed out. Try it out, but please be aware of its +limitations! + +== Simple Kotlin Module + +include::partial$example/kotlinlib/basic/1-simple.adoc[] + +== Custom Build Logic + +include::partial$example/kotlinlib/basic/2-custom-build-logic.adoc[] + +== Multi-Module Project + +include::partial$example/kotlinlib/basic/3-multi-module.adoc[] + +== Maven-Compatible Modules + +include::partial$example/kotlinlib/basic/4-compat-modules.adoc[] + + +== Realistic Kotlin Example Project + +include::partial$example/kotlinlib/basic/6-realistic.adoc[] + + +== History + +Mill's Kotlin support originated as the third-party plugin +https://github.com/lefou/mill-kotlin[lefou/mill-kotlin], which was later included with +the main Mill codebase under its +https://github.com/lefou/mill-kotlin/blob/main/LICENSE[Apache 2.0 License]. diff --git a/docs/modules/ROOT/pages/kotlinlib/linting.adoc b/docs/modules/ROOT/pages/kotlinlib/linting.adoc new file mode 100644 index 00000000000..3eaa8331535 --- /dev/null +++ b/docs/modules/ROOT/pages/kotlinlib/linting.adoc @@ -0,0 +1,25 @@ += Linting Kotlin Projects +:page-aliases: Linting_Kotlin_Projects.adoc + +include::partial$gtag-config.adoc[] + +This page will discuss common topics around enforcing the code quality of Kotlin +codebases using the Mill build tool + + +== Linting with Detekt + +include::partial$example/kotlinlib/linting/1-detekt.adoc[] + +== Linting with KtLint + +include::partial$example/kotlinlib/linting/2-ktlint.adoc[] + +== Autoformatting with KtFmt + +include::partial$example/kotlinlib/linting/3-ktfmt.adoc[] + +== Code Coverage with Kover + +include::partial$example/kotlinlib/linting/4-kover.adoc[] + diff --git a/docs/modules/ROOT/pages/kotlinlib/module-config.adoc b/docs/modules/ROOT/pages/kotlinlib/module-config.adoc new file mode 100644 index 00000000000..51747e47cfa --- /dev/null +++ b/docs/modules/ROOT/pages/kotlinlib/module-config.adoc @@ -0,0 +1,56 @@ += Kotlin Module Configuration +:page-aliases: Kotlin_Module_Config.adoc + +include::partial$gtag-config.adoc[] + +:language: Kotlin +:language-small: kotlin + +This page goes into more detail about the various configuration options +for `KotlinModule`. + +Many of the APIs covered here are listed in the API documentation: + +* {mill-doc-url}/api/latest/mill/kotlinlib/KotlinModule.html[mill.kotlinlib.KotlinModule] + + + +== Common Configuration Overrides + +include::partial$example/kotlinlib/module/1-common-config.adoc[] + + +== Compilation & Execution Flags + +include::partial$example/kotlinlib/module/4-compilation-execution-flags.adoc[] + +== Classpath and Filesystem Resources + +include::partial$example/kotlinlib/module/7-resources.adoc[] + +== Kotlin Compiler Plugins + +include::partial$example/kotlinlib/module/8-kotlin-compiler-plugins.adoc[] + +== Doc-Jar Generation + +include::partial$example/kotlinlib/module/9-docjar.adoc[] + +== Specifying the Main Class + +include::partial$example/kotlinlib/module/11-main-class.adoc[] + + + +== Custom Tasks + +include::partial$example/kotlinlib/module/2-custom-tasks.adoc[] + +== Overriding Tasks + +include::partial$example/kotlinlib/module/3-override-tasks.adoc[] + + +== Native C Code with JNI + +include::partial$example/kotlinlib/module/15-jni.adoc[] diff --git a/docs/modules/ROOT/pages/kotlinlib/publishing.adoc b/docs/modules/ROOT/pages/kotlinlib/publishing.adoc new file mode 100644 index 00000000000..4547af5c037 --- /dev/null +++ b/docs/modules/ROOT/pages/kotlinlib/publishing.adoc @@ -0,0 +1,32 @@ += Kotlin Packaging & Publishing +:page-aliases: Publishing_Kotlin_Projects.adoc + +include::partial$gtag-config.adoc[] + +This page will discuss common topics around packaging and publishing your Kotlin projects for others to use + + +== Customizing the Assembly + +include::partial$example/kotlinlib/publishing/1-assembly-config.adoc[] + + +== Publishing Locally + +include::partial$example/kotlinlib/publishing/2-publish-module.adoc[] + +include::partial$Publishing_Footer.adoc[] + +== `jlink` and `jpackage` + + +Mill has builtin support for the https://docs.oracle.com/en/java/javase/11/tools/jlink.html[JLink] and +https://docs.oracle.com/en/java/javase/17/docs/specs/man/jpackage.html[JPackage] command line tools. +For more details, see: + +* xref:javalib/publishing.adoc#_java_app_and_bundles_using_jlink[Java App and Bundles using JLink] +* xref:javalib/publishing.adoc#_java_installers_using_jpackage[Java Installers using JPackage] + +== Building Native Image with Graal VM + +include::partial$example/kotlinlib/publishing/7-native-image.adoc[] diff --git a/docs/modules/ROOT/pages/kotlinlib/testing.adoc b/docs/modules/ROOT/pages/kotlinlib/testing.adoc new file mode 100644 index 00000000000..ae8c7b25218 --- /dev/null +++ b/docs/modules/ROOT/pages/kotlinlib/testing.adoc @@ -0,0 +1,30 @@ += Testing Kotlin Projects +:page-aliases: Testing_Kotlin_Projects.adoc + +include::partial$gtag-config.adoc[] + +This page will discuss common topics around working with test suites using the Mill build tool + +== Defining Unit Test Suites + +include::partial$example/kotlinlib/testing/1-test-suite.adoc[] + + +== Test Dependencies + +include::partial$example/kotlinlib/testing/2-test-deps.adoc[] + +== Defining Integration Test Suites + +include::partial$example/kotlinlib/testing/3-integration-suite.adoc[] + +== Test Grouping + +include::partial$example/kotlinlib/testing/4-test-grouping.adoc[] + + +== Github Actions Test Reports + +If you use Github Actions for CI, you can use https://github.com/mikepenz/action-junit-report in +your pipeline to render the generated `test-report.xml` files nicely on Github. See +https://github.com/com-lihaoyi/mill/pull/4218/files for an example integration \ No newline at end of file diff --git a/docs/modules/ROOT/pages/kotlinlib/web-examples.adoc b/docs/modules/ROOT/pages/kotlinlib/web-examples.adoc new file mode 100644 index 00000000000..b9a9586e851 --- /dev/null +++ b/docs/modules/ROOT/pages/kotlinlib/web-examples.adoc @@ -0,0 +1,34 @@ += Kotlin Web Project Examples + +include::partial$gtag-config.adoc[] + +This page contains examples of using Mill as a build tool for web-applications. +It covers setting up a basic backend server with a variety of server frameworks + +== Ktor Hello World App + +include::partial$example/kotlinlib/web/1-hello-ktor.adoc[] + +== Ktor TodoMvc App + +include::partial$example/kotlinlib/web/2-todo-ktor.adoc[] + +== Simple KotlinJS Module + +include::partial$example/kotlinlib/web/3-hello-kotlinjs.adoc[] + +== Ktor Webapp, KotlinJS Client + +include::partial$example/kotlinlib/web/4-webapp-kotlinjs.adoc[] + +== Ktor KotlinJS Code Sharing + +include::partial$example/kotlinlib/web/5-webapp-kotlinjs-shared.adoc[] + +== Spring Boot Hello World App + +include::partial$example/kotlinlib/web/6-hello-spring-boot.adoc[] + +== Spring Boot TodoMvc App + +include::partial$example/kotlinlib/web/7-todo-spring-boot.adoc[] \ No newline at end of file diff --git a/docs/modules/ROOT/pages/large/large.adoc b/docs/modules/ROOT/pages/large/large.adoc new file mode 100644 index 00000000000..9c0c428adab --- /dev/null +++ b/docs/modules/ROOT/pages/large/large.adoc @@ -0,0 +1,19 @@ += Large Builds and Monorepos + +This section walks through Mill features and techniques used for managing large builds. +While Mill works great for small single-module projects, it is also able to work +effectively with large projects with hundreds of modules. Mill's own build for the +https://github.com/com-lihaoyi/mill[com-lihaoyi/mill] project has ~400 modules, and +other proprietary projects may have many more. + +Mill modules are cheap. Having more modules does not significantly impact performance +or resource usage, build files are incrementally re-compiled when modified, and modules are +lazily loaded and initialized only when needed. So you are encouraged to break up your project +into modules to manage the layering of your codebase or benefit from parallelism. + +Apart from Mill's basic scalability and performance, Mill also comes with many features +that can be utilized to help you manage the build system of a large project or codebase: + +* xref:large/selective-execution.adoc[] + +* xref:large/multi-file-builds.adoc[] \ No newline at end of file diff --git a/docs/modules/ROOT/pages/large/multi-file-builds.adoc b/docs/modules/ROOT/pages/large/multi-file-builds.adoc new file mode 100644 index 00000000000..b2f363c51d3 --- /dev/null +++ b/docs/modules/ROOT/pages/large/multi-file-builds.adoc @@ -0,0 +1,16 @@ += Multi-File Builds +:page-aliases: Structuring_Large_Builds.adoc + +include::partial$gtag-config.adoc[] + +include::partial$example/large/multi/10-multi-file-builds.adoc[] + +== Helper Files + +include::partial$example/large/multi/11-helper-files.adoc[] + +== Legacy `.sc` extension + +include::partial$example/large/multi/12-helper-files-sc.adoc[] + + diff --git a/docs/modules/ROOT/pages/large/selective-execution.adoc b/docs/modules/ROOT/pages/large/selective-execution.adoc new file mode 100644 index 00000000000..f6da91ea78d --- /dev/null +++ b/docs/modules/ROOT/pages/large/selective-execution.adoc @@ -0,0 +1,78 @@ += Selective Test Execution + +include::partial$gtag-config.adoc[] + + +include::partial$example/large/selective/9-selective-execution.adoc[] + + +== Reproducibility and Determinism + +Selective execution relies on the inputs to your project being deterministic +and reproducible, except for the code changes between the two versions, so that +Mill can compare the state of the build inputs before and after and only run +tasks downstream of those that changed. This is usually the case, but there are +some subtleties to be aware of: + +- *Dynamic `Task.Input` to capture Git metadata must be disabled*, e.g. using + https://github.com/lefou/mill-vcs-version[mill-vcs-version]. The easiest way to do + this is to guard such dynamic inputs on an environment variable, such that + in most scenarios it returns a constant `"SNAPSHOT"` string, and only when + necessary do you pass in the environment variable to compute a real version (e.g. + during publishing) + +```scala +def myProjectVersion: T[String] = Task.Input { + if (Task.env.contains("MY_PROJECT_STABLE_VERSION")) VcsVersion.calcVcsState(Task.log).format() + else "SNAPSHOT" +} +``` + +- *The filesystem layout and position of the before/after codebases must be exactly + the same*. This is not an issue when running `selective.prepare`/`selective.run` on + the same folder on one machine, but if the two calls are run on separate machines + you need to make sure the directory path is the same. + +- *You must use the same Operating System amd Filesystem*, as differences there will + cause the filesystem signatures to change and thus spuriously trigger downstream tasks. + e.g. you cannot run `selective.prepare` on a Windows machine and `selective.run` on Linux + +- *Filesystem permissions must be preserved before/after*. e.g. running `selective,run}` + on different Github Actions machines sharing artifacts can cause issues as + `upload-artifact`/`download-artifact` https://github.com/actions/download-artifact#permission-loss[does not preserve filesystem permissions]. + If this is an issue, you can run `chmod -R . 777` before each of `selective.{prepare,run}` + to ensure they have the exact same filesystem permissions. + + +== Debugging Selective Execution + +* Use `selective.resolve` before `selective.run`: this will print out what it was going to run, + and can give you a chance to eyeball if the list of targets to run makes sense or not + +* Look at xref:fundamentals/out-dir.adoc#_mill_invalidation_tree_json[out/mill-invalidation-tree.json], + whether on disk locally or printing it out (e.g via `cat`) on your CI machines to diagnose issues + there. This would give you a richer view of what source tasks or inputs are the ones actually + triggered the invalidation, and what tasks were just invalidated due to being downstream of them. + + +== Limitations + +* *Selective execution can only work at the Mill Task granularity*. e.g. When working with + Java/Scala/Kotlin modules and test modules, the granularity of selection is at entire modules. + That means that if your modules are individually large, selective execution may not be able + to significantly narrow down the set of tests that need to run + +* *Selective execution usually cannot narrow down the set of integration tests to run*. Integration + tests by their nature depend on the entire application or system, and run test cases that + exercise different parts of it. But selective execution works at the task level and can only + see that every integration test depends on the entire codebase, and so any change in the + entire codebase could potentially affect any integration test, so selective execution will + select all of them. + +* *Selective execution is coarser-grained than runtime task caching*. e.g. If you add a newline + to a `foo/src/Foo.java` file and run `foo.testCached`, selective testing only knows that + `foo.sources` changed and `foo.testCached` is downstream of it, but it cannot know that + when you run `foo.compile` on the changed sources, the compilation output is unchanged, and + so `.testCached` can be skipped. This is inherent in the nature of selective execution, which + does its analysis without evaluation-time information and thus will always be more conservative + than the task skipping and cache-reuse that is done during evaluation. diff --git a/docs/modules/ROOT/pages/migrating/maven.adoc b/docs/modules/ROOT/pages/migrating/maven.adoc new file mode 100644 index 00000000000..41830940c2d --- /dev/null +++ b/docs/modules/ROOT/pages/migrating/maven.adoc @@ -0,0 +1,155 @@ += Migrating From Maven to Mill +:page-aliases: Migrating_A_Maven_Build_to_Mill.adoc +:icons: font + +include::partial$gtag-config.adoc[] + +The Mill `init` command can be used to convert a Maven build to Mill. This has +xref:#limitations[limitations] and is not intended to reliably migrate 100% of +Maven builds out there in the wild, but is instead meant to provide the basic +scaffolding of a Mill build for you to further refine and update manually. + +Each Maven module with a `pom.xml` is converted to a Mill `build.mill`/`package.mill` +file containing a top-level `MavenModule`. A nested `test` module is defined if both: + +* `src/test` exists +* a supported xref:javalib/testing.adoc[test framework] is detected (for a _tests only_ +module with test sources in `src/main/java`) + + +Again, note that `mill init` importing Maven builds is best effort. +This means that while small projects can be expected to complete without issue: + +include::partial$example/javalib/migrating/1-maven-complete.adoc[] + +More larger projects often require some manual tweaking in order to work: + +include::partial$example/javalib/migrating/2-maven-incomplete.adoc[] + +Nevertheless, even for larger builds `mill init` automates most of the tedious +busy-work of writing `build.mill`/`package.mill` files, and makes it much quicker +to get a working Mill build for any existing Maven project. + + +== Capabilities + +The conversion + +* handles deeply nested modules +* captures project metadata +* configures dependencies for scopes: +** compile +** provided +** runtime +** test +* configures testing frameworks: +** JUnit 4 +** JUnit 5 +** TestNG +* configures multiple, compile and test, resource directories + +=== Command line arguments +.name of generated base module trait defining project metadata settings +[source,sh] +---- +./mill init --base-module MyModule +---- +.name of generated nested test module (defaults to `test`) +[source,sh] +---- +./mill init --test-module test +---- +.name of generated companion object defining constants for dependencies +[source,sh] +---- +./mill init --deps-object Deps +---- +.capture properties defined in `pom.xml` for publishing +[source,sh] +---- +./mill init --publish-properties +---- +.merge build files generated for a multi-module build +[source,sh] +---- +./mill init --merge +---- + +.use cache for Maven repository system +[source,sh] +---- +./mill init --cache-repository +---- +.process Maven plugin executions and configurations +[source,sh] +---- +./mill init --process-plugins +---- + +=== Verified projects + +The conversion has been tested with the following projects: + +* https://github.com/fusesource/jansi/archive/refs/tags/jansi-2.4.1.zip[jansi] +[source,sh] +---- +./mill init --base-module JansiModule --deps-object Deps --cache-repository --process-plugins +---- + +* https://github.com/davidmoten/geo/archive/refs/tags/0.8.1.zip[geo] (multi-module build) +[source,sh] +---- +./mill init --base-module GeoModule --deps-object Deps --merge --cache-repository --process-plugins +---- + +Post `init`, the following tasks were executed successfully: + +* `compile` +* `test` +* `publishLocal` + +[#limitations] +== Limitations + +The conversion does not support + +* build extensions +* build profiles +* non-Java (native) sources + +Maven plugin support is limited to + +* https://maven.apache.org/plugins/maven-compiler-plugin/[maven-compiler-plugin] + +[TIP] +==== +These limitations can be overcome by: + +* configuring equivalent Mill xref:extending/contrib-plugins.adoc[contrib] + or xref:extending/thirdparty-plugins.adoc[third party] plugins +* defining custom xref:extending/writing-plugins.adoc[plugins] +* defining custom xref:fundamentals/tasks.adoc[tasks] +* defining custom xref:fundamentals/cross-builds.adoc[cross modules] +==== + +== FAQ + +.How to fix compilation errors in generated build files? + +This could happen if a module and task name collision occurs. Either rename the module or enclose the name in backticks. + + +.How to fix JPMS `module not found` compilation errors? + +Set https://github.com/tfesenko/Java-Modules-JPMS-CheatSheet#how-to-export-or-open-a-package[additional command line options] +for dependencies. + + +.How to fix test compilation errors? + +* The test framework configured may be for an unsupported version; try upgrading the + corresponding dependencies. +* Mill does not add `provided` dependencies to the transitive dependencies of the nested + test module; specify the dependencies again, in one of `ivyDeps`, `compileIvyDeps`, `runIvyDeps`, in the test module. + + diff --git a/docs/modules/ROOT/pages/migrating/migrating.adoc b/docs/modules/ROOT/pages/migrating/migrating.adoc new file mode 100644 index 00000000000..aca914c3ee3 --- /dev/null +++ b/docs/modules/ROOT/pages/migrating/migrating.adoc @@ -0,0 +1,305 @@ += Migrating to Mill + +include::partial$gtag-config.adoc[] + +This page documents a playbook for migrating existing projects +from Maven, Gradle, or SBT to Mill. Some build tools also have additional semi-automated +tooling (e.g. see xref:migrating/maven.adoc[]), other automation is work-in-process +(e.g. migrating from https://github.com/com-lihaoyi/mill/issues/3962[Gradle] or +https://github.com/com-lihaoyi/mill/issues/3450[SBT]) but while automation helps with +some of the scaffolding the general principles laid out on this page still apply. + +== How Long Does Migration Take? + +Migrating an existing project to Mill may take anywhere from an hour for small projects +to several days or more for larger projects. These steps come from experience doing proof-of-concept +migrations of a range of real-world projects to Mill, from simple single-module codebases to +complex multi-module builds with dozens of modules: + +|=== +| Migrated Project | Lines | Modules | Estimated Implementation Time +| https://github.com/com-lihaoyi/mill/tree/main/example/thirdparty/jimfs/build.mill[JimFS] | ~26kLOC | 1 module | ~2 hours +| https://github.com/com-lihaoyi/mill/tree/main/example/thirdparty/commons-io/build.mill[Apache Commons-IO] | ~100kLOC | 1 module | ~2 hours +| https://github.com/com-lihaoyi/mill/tree/main/example/thirdparty/gatling/build.mill[Gatling] | ~70kLOC | 21 modules | ~1 day +| https://github.com/com-lihaoyi/mill/tree/main/example/thirdparty/arrow/build.mill[Arrow] | ~60kLOC | 22 modules | ~5 days +| https://github.com/com-lihaoyi/mill/tree/main/example/thirdparty/mockito/build.mill[Mockito] | ~100kLOC | 22 modules | ~5 days +| https://github.com/com-lihaoyi/mill/tree/main/example/thirdparty/netty/build.mill[Netty] | ~500kLOC | 47 modules | ~5 days +|=== + + + +The numbers above are for proof-of-concept migrations, for someone with prior +build system expertise; you may need twice the implementation time for a complete +production-ready migration, with additional time for change-management work. + +Build system migrations are not cheap, but the productivity benefits of a +faster and easier to use build system are significant (see xref:comparisons/why-mill.adoc[]), +especially multiplied over an entire team of engineers who may be working on +the codebase for multiple years. Generally the more actively a project is developed, and the +longer you expect development to continue, the more worthwhile it is to migrate from +Maven/Gradle/SBT +to Mill. + +== How to Approach Migration + +The basic approach taken for any such migration is as follows: + +1. *The existing source code and build system for the project is to + be left in-place and fully working.* + +** This ensures you have the flexibility to back out of the migration at any point in time +** On completion, this allows you to + perform head-to-head comparisons between old and new build systems + +2. *A parallel Mill build is set up for the project.* + +** Sub-project `pom.xml` and `build.gradle` files need to be translated into Mill ``Module``s +** Third-party dependencies need to be translated into Mill's `def ivyDeps` +** Third-party Plugins need to be replaced by their Mill equivalent, or re-implemented +** Custom build logic may need to be re-implemented in Mill + +3. *Once completed, the Mill build can be used as the default for some period of time* + +** This period gives the time to be confident in the robustness of the new Mill build system, + during which both old and new build systems should be maintained and kept up top date. + +4. *After you are comfortable with the new Mill build, the old build + system can be removed.* + +Of the four steps above, most of the work goes into (2) setting up the parallel Mill +build for your project. We will walk through each of the sub-bullets in that step +below + +== Translating Subprojects to Modules + +* Download a `mill` bootstrap file as discussed in xref:cli/installation-ide.adoc[] and + create a `build.mill` file as described in xref:javalib/intro.adoc[] + +* Define a Mill `Module` for each subproject in the existing build, + and a `test` module for each. + +[cols="1,3"] +|=== +| Build Tool | Dependency +| Java +a| +```scala +object foo extends MavenModule{ + object test extends MavenTests{ + } +} +``` + +| Kotlin +a| +```scala +object foo extends KotlinMavenModule{ + object test extends KotlinMavenTests{ + } +} +``` + +| Scala +a| +```scala +object foo extends SbtModule{ + object test extends SbtTests{ + } +} +``` +|=== + + +* These modules should have names corresponding to the existing subprojects +path on disk, e.g. a subproject inside `foo/` should be `object foo extends MavenModule`, +or a subproject `bar/qux/` should be a nested: + +```scala +object bar extends MavenModule { + object qux extends MavenModule +} +``` + +* Wire up the existing inter-subproject dependencies using `def moduleDeps = Seq(...)` inside `object foo`. + +** Test dependencies can also be specified, using `def moduleDeps = super.moduleDeps ++ Seq(...)` +inside the `object test`. Note that test modules need to use `super.moduleDeps` to preserve +the dependency on the enclosing application module + +```scala +object foo extends MavenModule{ + object test extends MavenTests{ + } +} + +object bar extends MavenModule{ + def moduleDeps = Seq(foo) // application code dependency + object test extends MavenTests{ + def moduleDeps = super.moduleDeps ++ Seq(foo) // test code dependency + } +} +``` + + + +At this point, you have the rough skeleton of the project laid out. You can run +`./mill visualize __.compile` to show an SVG graph of how the project is laid out, and +`./mill show __.sources` to show where the source folders for each module are to eyeball +them and verify they are pointing at the right place. For a fully-self-contained project +with no external dependencies you could even compile it at this point, but most projects +will require some degree of third party dependencies that will need to be configured: + + +== Translating Third-Party Dependencies + +* Define the third-party dependencies for each module with `def ivyDeps`. + +These are a relatively straightforward translation: + +[cols="1,3"] +|=== +| Build Tool | Dependency +| Maven +a| +```xml + + com.google.guava + guava + 3.3.1-jre + +``` + +| Gradle +a| +```scala +implementation "com.google.guava:guava:3.3.1-jre" +``` + +| SBT +a| +```scala +libraryDependencies += "com.google.guava" % "guava" % "3.3.1-jre" +``` + +| Mill +a| +```scala +def ivyDeps = Agg(ivy"com.google.guava:guava:3.3.1-jre") +``` +|=== + +If you are building a Scala project using SBT: + +[cols="1,3"] +|=== +| Build Tool | Dependency +| SBT +a| +```scala +libraryDependencies += "com.lihaoyi" %% "scalatags" % "0.12.0" +``` + +| Mill +a| +```scala +def ivyDeps = Agg(ivy"com.lihaoyi::scalatags:0.12.0") +``` +|=== + +* Again, test-only third-party dependencies are defined inside the `object test` submodule. + +* Compile-only dependencies can be defined with `def compileIvyDeps`, and runtime-only/provided + dependencies defined with `def runIvyDeps` + +The documentation for xref:javalib/dependencies.adoc[] and xref:fundamentals/library-deps.adoc[] +has more details: how to configure unmanaged jars, repositories, pinning versions, etc. + + +== Translating Third-Party Plugins + +At a high level, you want to take plugins that you use in Maven/Gradle/SBT and replace +them either with builtin Mill functionality: + +* xref:javalib/linting.adoc[] +* xref:javalib/testing.adoc[] +* xref:javalib/publishing.adoc[] +* Mill xref:extending/contrib-plugins.adoc[] or Mill xref:extending/thirdparty-plugins.adoc[] + +Third-party plugins differ between build systems, so the configuration and behavior may +differ in minor ways, but the high-level functionality should mostly be there. + +== Translating Custom Build Logic + +Generally, custom build logic from your own custom plugins or extensions will need to +be re-implemented. This is usually not terribly difficult, as either the logic is simple +(just moving some files around and zipping/unzipping them), or the logic is complex but +comes from an external tool (e.g. third-party compilers, code-generators, linters, etc.) + +1. For the simple cases, you can usually accomplish what you want using Mill's + xref:javalib/intro.adoc#_custom_build_logic[custom build logic]. Mill provides bundled + libraries for working with filesystem/subprocesses (xref:fundamentals/bundled-libraries.adoc#_os_lib[OS-Lib]), + JSON/binary serialization (xref:fundamentals/bundled-libraries.adoc#_upickle[uPickle]), + HTTP requests (xref:fundamentals/bundled-libraries.adoc#_requests_scala[Requests-Scala]). + +2. For using third-party libraries in your build, these are usually published + to Maven Central or some other package repository, in which case they are + easy to directly import and use in your custom tasks (see xref:extending/import-ivy-plugins.adoc[]) + +3. For more sophisticated integrations, e.g. if you need to dynamically compile + and run JVM programs or build plugins as part of your build, you can do so via + (see xref:extending/running-jvm-code.adoc[]) + +== Long Tail Issues + +Typically, after you are done with the rough skeleton of your new Mill build with +most things compiling, you will find that some code does not yet compile and other +code compiles but does not pass tests. There will always be a long tail of small +configuration tweaks that need to be ported from your existing build system to your +new Mill build: + +* You may need to update code to use the `MILL_TEST_RESOURCE_DIR` environment variable + rather than the `"resources/"` folder directly in code, since Mill runs tests in + xref:depth/sandboxing.adoc[Sandboxes] that guard against unwanted filesystem access. +* Similarly, you may need to us `Task.workspace` or `mill.api.WorkspaceRoot.workspaceRoot` + to access the project root folder in custom build tasks, since the Mill build process + also runs in a sandbox by default +* Some tests may require xref:fundamentals/configuring-jvm-versions.adoc[] to run +* Some modules may require specific xref:javalib/module-config.adoc#_compilation_execution_flags[Compilation & Execution Flags] +* Some code may make use of xref:javalib/module-config.adoc#_annotation_processors[Annotation Processors] +* You may have xref:javalib/module-config.adoc#_native_c_code_with_jni[native code you need to compile and interop with using JNI] +* def may need to use frameworks like xref:javalib/web-examples.adoc#_spring_boot_hello_world_app[Spring Boot] + or xref:javalib/web-examples.adoc#_micronaut_hello_world_app[Micronaut] + +In general none of these issues are blocking, but they do require you to investigate +the various failures and figure out which part of your existing Mill build is missing. + +== Cleanup + +Lastly, at this point you have a Mill build that works, but you may not have a Mill build +that is easily maintainable. Mill provides a lot of tools to improve the maintainability +and understandability of your build system, and while you may not want to apply them up +front during the migration, once you have everything working you can go back and revisit +to see which ones may help: + +* xref:fundamentals/modules.adoc#_trait_modules[Trait Modules] to centralize common config + +* xref:large/multi-file-builds.adoc[Multi-File Builds] to let you co-locate + build logic and the code being built + +* xref:extending/writing-plugins.adoc[Writing and Publishing your own Mill Plugins] + if you want to share your build logic across multiple projects/repos in your organization + +== Conclusion + +As mentioned at the start of this page, migrating to a new build tool is not cheap +or easy, and can easily take a significant time commitment. Automation does help, +whether bundled in Mill or your own DIY scripts, but there will always be a long +tail of manual debugging and investigation necessary to reproduce every quirk and +idiosyncrasy of your old build system in your new Mill build. + +However, while _tedious_, such migrations are usually not _difficult_. Most +build systems use a relatively small set of third-party tools with small amounts of +custom logic, and Mill has built-in integrations with many common JVM tools and +makes custom logic easy to implement. In the end the decision to migrate comes down to +the benefits of Mill (see xref:comparisons/why-mill.adoc[]) outweighing the cost of migration, +which becomes more true as the lifespan and pace of development on a project grows. \ No newline at end of file diff --git a/docs/modules/ROOT/pages/pythonlib/dependencies.adoc b/docs/modules/ROOT/pages/pythonlib/dependencies.adoc new file mode 100644 index 00000000000..79e946ce869 --- /dev/null +++ b/docs/modules/ROOT/pages/pythonlib/dependencies.adoc @@ -0,0 +1,31 @@ += Python Library Dependencies + +include::partial$gtag-config.adoc[] + + +This page goes into more detail about configuring third party dependencies +for `PythonModule`s. + +== Adding Dependencies + +include::partial$example/pythonlib/dependencies/1-pip-deps.adoc[] + +=== Adding Dependencies via requirements.txt files + +include::partial$example/pythonlib/dependencies/2-pip-requirements.adoc[] + +== Unmanaged Wheels + +include::partial$example/pythonlib/dependencies/3-unmanaged-wheels.adoc[] + +== Downloading Unmanaged Wheels + +include::partial$example/pythonlib/dependencies/4-downloading-unmanaged-wheels.adoc[] + +== Using Custom Package Indexes + +include::partial$example/pythonlib/dependencies/5-repository-config.adoc[] + +== Debugging + +include::partial$example/pythonlib/dependencies/6-debugging.adoc[] diff --git a/docs/modules/ROOT/pages/pythonlib/intro.adoc b/docs/modules/ROOT/pages/pythonlib/intro.adoc new file mode 100644 index 00000000000..c6a4a46e8ee --- /dev/null +++ b/docs/modules/ROOT/pages/pythonlib/intro.adoc @@ -0,0 +1,22 @@ + += Building Python with Mill + +include::partial$gtag-config.adoc[] + + +:language: python +:language-small: python + +include::partial$Intro_Header.adoc[] + +== Simple Python Module + +include::partial$example/pythonlib/basic/1-simple.adoc[] + +== Custom Build Logic + +include::partial$example/pythonlib/basic/2-custom-build-logic.adoc[] + +== Multi-Module Project + +include::partial$example/pythonlib/basic/3-multi-module.adoc[] diff --git a/docs/modules/ROOT/pages/pythonlib/module-config.adoc b/docs/modules/ROOT/pages/pythonlib/module-config.adoc new file mode 100644 index 00000000000..460318c60f4 --- /dev/null +++ b/docs/modules/ROOT/pages/pythonlib/module-config.adoc @@ -0,0 +1,36 @@ += Python Module Configuration +:page-aliases: Python_Module_Config.adoc + +include::partial$gtag-config.adoc[] + +:language: Python +:language-small: python + +This page goes into more detail about the various configuration options +for `PythonModule`. + +// TODO: How to include PythonModule Link ? +// Many of the APIs covered here are listed in the Scaladoc: + +// * {mill-doc-url}/api/latest/mill/pythonlib/PythonModule.html[mill.pythonlib.PythonModule] + + +== Common Configuration Overrides + +include::partial$example/pythonlib/module/1-common-config.adoc[] + +== Custom Tasks + +include::partial$example/pythonlib/module/2-custom-tasks.adoc[] + +== Overriding Tasks + +include::partial$example/pythonlib/module/3-override-tasks.adoc[] + +== Compilation & Execution Flags + +include::partial$example/pythonlib/module/4-compilation-execution-flags.adoc[] + +== PythonPath and Filesystem Resources + +include::partial$example/pythonlib/module/5-resources.adoc[] diff --git a/docs/modules/ROOT/pages/pythonlib/publishing.adoc b/docs/modules/ROOT/pages/pythonlib/publishing.adoc new file mode 100644 index 00000000000..5671b0dddf7 --- /dev/null +++ b/docs/modules/ROOT/pages/pythonlib/publishing.adoc @@ -0,0 +1,12 @@ += Python Packaging & Publishing +:page-aliases: Publishing_Python_Projects.adoc + +include::partial$gtag-config.adoc[] + +This page will discuss common topics around publishing your Python projects for others to use. + +include::partial$example/pythonlib/publishing/1-publish-module.adoc[] + +== Advanced Packaging + +include::partial$example/pythonlib/publishing/2-publish-module-advanced.adoc[] diff --git a/docs/modules/ROOT/pages/pythonlib/testing.adoc b/docs/modules/ROOT/pages/pythonlib/testing.adoc new file mode 100644 index 00000000000..64118b3f103 --- /dev/null +++ b/docs/modules/ROOT/pages/pythonlib/testing.adoc @@ -0,0 +1,18 @@ += Testing Python Projects +:page-aliases: Testing_Python_Projects.adoc + +include::partial$gtag-config.adoc[] + +This page will discuss topics around defining and running Python tests using the Mill build tool + +== Defining Unit Test Suites + +include::partial$example/pythonlib/testing/1-test-suite.adoc[] + +== Test Dependencies + +include::partial$example/pythonlib/testing/2-test-deps.adoc[] + +== Defining Integration Test Suites + +include::partial$example/pythonlib/testing/3-integration-suite.adoc[] diff --git a/docs/modules/ROOT/pages/pythonlib/web-examples.adoc b/docs/modules/ROOT/pages/pythonlib/web-examples.adoc new file mode 100644 index 00000000000..5118e2a3220 --- /dev/null +++ b/docs/modules/ROOT/pages/pythonlib/web-examples.adoc @@ -0,0 +1,25 @@ += Python Web Project Examples +:page-aliases: Python_Web_Examples.adoc + +include::partial$gtag-config.adoc[] + +This page provides examples of using Mill as a build tool for Python web applications. +It includes setting up a basic "Hello, World!" application and developing a fully +functional https://todomvc.com/[TodoMVC] app with Flask and Django, showcasing best practices +for project organization, scalability, and maintainability. + +== Flask Hello World App + +include::partial$example/pythonlib/web/1-hello-flask.adoc[] + +== Flask TodoMVC App + +include::partial$example/pythonlib/web/2-todo-flask.adoc[] + +== Django Hello World App + +include::partial$example/pythonlib/web/3-hello-django.adoc[] + +== Django TodoMVC App + +include::partial$example/pythonlib/web/4-todo-django.adoc[] \ No newline at end of file diff --git a/docs/modules/ROOT/pages/reference/changelog.adoc b/docs/modules/ROOT/pages/reference/changelog.adoc new file mode 100644 index 00000000000..23d23d39850 --- /dev/null +++ b/docs/modules/ROOT/pages/reference/changelog.adoc @@ -0,0 +1,10 @@ +// Commented out because it gets picked up from the include below +// == Changelog +:page-aliases: Changelog.adoc + + +include::partial$project-readme.adoc[tag=changelog,leveloffset=-1] + +include::partial$project-readme.adoc[tag=changelogOld,leveloffset=-1] + +include::partial$gtag-config.adoc[] \ No newline at end of file diff --git a/docs/modules/ROOT/pages/scalalib/build-examples.adoc b/docs/modules/ROOT/pages/scalalib/build-examples.adoc new file mode 100644 index 00000000000..da0e19283b5 --- /dev/null +++ b/docs/modules/ROOT/pages/scalalib/build-examples.adoc @@ -0,0 +1,36 @@ += Scala Build Examples +:page-aliases: Scala_Build_Examples.adoc + +include::partial$gtag-config.adoc[] + + + +== Example Builds for Real Projects + +Mill comes bundled with example builds for real-world open-source projects, +demonstrating how Mill can be used to build code outside of tiny example codebases: + +=== Acyclic + +include::partial$example/thirdparty/acyclic.adoc[] + +=== Fansi + +include::partial$example/thirdparty/fansi.adoc[] + +== Real World Mill Builds + +=== Ammonite + +https://github.com/com-lihaoyi/Ammonite[Ammonite] is an ergonomic Scala REPL. + +=== Scala-CLI + +https://github.com/VirtusLab/scala-cli[Scala-CLI] is the primary CLI tool that +runs when you enter `scala` in the terminal. It is able to compile, test, run, +and package your Scala code in a variety of different ways + +=== Coursier + +https://github.com/coursier/coursier[Coursier] is a fast JVM dependency resolver, +used in many build tools down resolve and download third party dependencies \ No newline at end of file diff --git a/docs/modules/ROOT/pages/scalalib/dependencies.adoc b/docs/modules/ROOT/pages/scalalib/dependencies.adoc new file mode 100644 index 00000000000..902d8b7be15 --- /dev/null +++ b/docs/modules/ROOT/pages/scalalib/dependencies.adoc @@ -0,0 +1,37 @@ += Scala Library Dependencies + +include::partial$gtag-config.adoc[] + + +This page goes into more detail about configuring third party dependencies +for `ScalaModule`. + + + +== Adding Ivy Dependencies + +include::partial$example/scalalib/dependencies/1-ivy-deps.adoc[] + +== Runtime and Compile-time Dependencies + +include::partial$example/scalalib/dependencies/2-run-compile-deps.adoc[] + +== Dependency Management + +Mill has support for dependency management, see the +xref:fundamentals/library-deps.adoc#_dependency_management[Dependency Management section] +in xref:fundamentals/library-deps.adoc[]. + +== Unmanaged Jars + +include::partial$example/scalalib/dependencies/3-unmanaged-jars.adoc[] + + +== Downloading Unmanaged Jars + +include::partial$example/scalalib/dependencies/4-downloading-unmanaged-jars.adoc[] + + +== Repository Config + +include::partial$example/scalalib/dependencies/5-repository-config.adoc[] diff --git a/docs/modules/ROOT/pages/scalalib/intro.adoc b/docs/modules/ROOT/pages/scalalib/intro.adoc new file mode 100644 index 00000000000..21db19866b6 --- /dev/null +++ b/docs/modules/ROOT/pages/scalalib/intro.adoc @@ -0,0 +1,34 @@ + += Building Scala with Mill +:page-aliases: Intro_to_Mill_for_Scala.adoc, Scala_Intro_to_Mill.adoc + +include::partial$gtag-config.adoc[] + +:language: Scala +:language-small: scala + +include::partial$Intro_Header.adoc[] + +== Simple Scala Module + +include::partial$example/scalalib/basic/1-simple.adoc[] + +== Custom Build Logic + +include::partial$example/scalalib/basic/2-custom-build-logic.adoc[] + +== Multi-Module Project + +include::partial$example/scalalib/basic/3-multi-module.adoc[] + + +== SBT-Compatible Modules + +include::partial$example/scalalib/basic/4-compat-modules.adoc[] + + +== Realistic Scala Example Project + +include::partial$example/scalalib/basic/6-realistic.adoc[] + + diff --git a/docs/modules/ROOT/pages/scalalib/linting.adoc b/docs/modules/ROOT/pages/scalalib/linting.adoc new file mode 100644 index 00000000000..58c06ede50b --- /dev/null +++ b/docs/modules/ROOT/pages/scalalib/linting.adoc @@ -0,0 +1,45 @@ += Linting Scala Projects +:page-aliases: Linting_Scala_Projects.adoc + +include::partial$gtag-config.adoc[] + +This page will discuss common topics around maintaining the code quality of Scala +codebases using the Mill build tool + + +== Linting and Autofixing with Scalafix + +Scalafix is a tool that analyzes your Scala source code, performing intelligent analyses and +code quality checks, and is often able to automatically fix the issues that it discovers. +It can also perform automated refactoring. + +Mill supports Scalafix through the Mill-Scalafix third party module. See the module documentation +for more details: + +* https://github.com/joan38/mill-scalafix + +== Linting with Acyclic Files Enforcement + +include::partial$example/scalalib/linting/3-acyclic.adoc[] + +== Autoformatting with ScalaFmt + +include::partial$example/scalalib/linting/1-scalafmt.adoc[] + +== Code Coverage with Scoverage + +include::partial$example/scalalib/linting/2-contrib-scoverage.adoc[] + +Mill supports Scala code coverage analysis via the Scoverage contrib plugin. See the +contrib plugin documentation for more details: + +* xref:contrib/scoverage.adoc[] + +== Binary Compatibility Enforcement + +If you want to lint against binary compatibility breakages, e.g. when developing +an upstream library that downstream libraries may compile against, you can use +the Lightbend Migration Manager (MiMa) tool via the mill-mima plugin. See +the mill-mima documentation for more details: + +* https://github.com/lolgab/mill-mima \ No newline at end of file diff --git a/docs/modules/ROOT/pages/scalalib/module-config.adoc b/docs/modules/ROOT/pages/scalalib/module-config.adoc new file mode 100644 index 00000000000..a881dc814d1 --- /dev/null +++ b/docs/modules/ROOT/pages/scalalib/module-config.adoc @@ -0,0 +1,124 @@ += Scala Module Configuration +:page-aliases: Configuring_Mill.adoc, Scala_Module_Config.adoc + +include::partial$gtag-config.adoc[] + +:language: Scala +:language-small: scala + +This page goes into more detail about the various configuration options +for `ScalaModule`. + +Many of the APIs covered here are listed in the Scaladoc: + +* {mill-doc-url}/api/latest/mill/scalalib/ScalaModule.html[mill.scalalib.ScalaModule] + + +== Common Configuration Overrides + +include::partial$example/scalalib/module/1-common-config.adoc[] + +== Compilation & Execution Flags + +include::partial$example/scalalib/module/4-compilation-execution-flags.adoc[] + +== Classpath and Filesystem Resources + +include::partial$example/scalalib/module/7-resources.adoc[] + +== Scala Compiler Plugins + +include::partial$example/scalalib/module/8-scala-compiler-plugins.adoc[] + +== Scaladoc Config + +include::partial$example/scalalib/module/9-docjar.adoc[] + +== Specifying the Main Class + +include::partial$example/scalalib/module/11-main-class.adoc[] + + +== Cross-Scala-Version Modules + +include::partial$example/scalalib/module/14-cross-scala-version.adoc[] + +== Unidoc + +include::partial$example/scalalib/module/15-unidoc.adoc[] + +== Custom Tasks + +include::partial$example/scalalib/module/2-custom-tasks.adoc[] + +== Overriding Tasks + +include::partial$example/scalalib/module/3-override-tasks.adoc[] + + + + + +== Using the Ammonite Repl / Scala console + +All ``ScalaModule``s have a `console` and a `repl` task, to start a Scala console or an Ammonite Repl. + +When using the `console`, you can configure its `scalac` options using the `consoleScalacOptions` task. + +For example, you may want to inherit all of your regular `scalacOptions` but disable `-Xfatal-warnings`: + +.Example: Using `consoleScalacOptions` to disable fatal warnings +[source,scala,subs="attributes,verbatim"] +---- +import mill._, scalalib._ + +object foo extends ScalaModule { + def consoleScalacOptions = scalacOptions().filterNot(o => o == "-Xfatal-warnings") +} +---- + +To use the `repl`, you can (and sometimes need to) customize the Ammonite version to work with your selected Scala version. +Mill provides a default Ammonite version, +but depending on the Scala version you are using, there may be no matching Ammonite release available. +In order to start the repl, you may have to specify a different available Ammonite version. + +.Example: Overriding `ammoniteVersion` to select a release compatible to the `scalaVersion` +[source,scala,subs="attributes,verbatim"] +---- +import mill._. scalalib._ + +object foo extends ScalaModule { + def scalaVersion = "2.12.6" + def ammoniteVersion = "2.4.0" +} +---- + +[TIP] +-- +_Why is Ammonite tied to the exact Scala version?_ + +This is because Ammonite depends on the Scala compiler. +In contrast to the Scala library, compiler releases do not guarantee any binary compatibility between releases. +As a consequence, Ammonite needs full Scala version specific releases. + +The older your used Mill version or the newer the Scala version you want to use, the higher is the risk that the default Ammonite version will not match. +-- + +== Disabling incremental compilation with Zinc + +By default all ``ScalaModule``s use incremental compilation via https://github.com/sbt/zinc[Zinc] to +only recompile sources that have changed since the last compile, or ones that have been invalidated +by changes to upstream sources. + +If for any reason you want to disable incremental compilation for a module, you can override and set +`zincIncrementalCompilation` to `false` + +.`build.mill` +[source,scala,subs="attributes,verbatim"] +---- +import mill._, scalalib._ + +object foo extends ScalaModule { + def zincIncrementalCompilation = false +} +---- diff --git a/docs/modules/ROOT/pages/scalalib/native-examples.adoc b/docs/modules/ROOT/pages/scalalib/native-examples.adoc new file mode 100644 index 00000000000..90d11b01714 --- /dev/null +++ b/docs/modules/ROOT/pages/scalalib/native-examples.adoc @@ -0,0 +1,30 @@ += Scala Native Examples +:page-aliases: Scala_Native_Examples.adoc + +++++ + +++++ + + +This page contains examples of using Mill as a build tool for scala-native applications. +It covers setting up a basic scala-native application that calls C function within it, +as well as example of two modules with a scala-native application. + +== Simple + +include::partial$example/scalalib/native/1-simple.adoc[] + +== Interop + +include::partial$example/scalalib/native/2-interop.adoc[] + +== Multi-Module + +include::partial$example/scalalib/native/3-multi-module.adoc[] + +== Common Config + +include::partial$example/scalalib/native/4-common-config.adoc[] + diff --git a/docs/modules/ROOT/pages/scalalib/publishing.adoc b/docs/modules/ROOT/pages/scalalib/publishing.adoc new file mode 100644 index 00000000000..d315e3f1ab7 --- /dev/null +++ b/docs/modules/ROOT/pages/scalalib/publishing.adoc @@ -0,0 +1,32 @@ += Scala Packaging & Publishing +:page-aliases: Publishing_Scala_Projects.adoc + +include::partial$gtag-config.adoc[] + +This page will discuss common topics around packaging and publishing your Scala projects for others to use + + +== Customizing the Assembly + +include::partial$example/scalalib/publishing/1-assembly-config.adoc[] + + +== Publishing Locally + +include::partial$example/scalalib/publishing/2-publish-module.adoc[] + +include::partial$Publishing_Footer.adoc[] + +== `jlink` and `jpackage` + +Mill has builtin support for the https://docs.oracle.com/en/java/javase/11/tools/jlink.html[JLink] and +https://docs.oracle.com/en/java/javase/17/docs/specs/man/jpackage.html[JPackage] command line tools. +For more details, see: + +* xref:javalib/publishing.adoc#_java_app_and_bundles_using_jlink[Java App and Bundles using JLink] +* xref:javalib/publishing.adoc#_java_installers_using_jpackage[Java Installers using JPackage] + + +== Building Native Image with Graal VM + +include::partial$example/scalalib/publishing/7-native-image.adoc[] diff --git a/docs/modules/ROOT/pages/scalalib/testing.adoc b/docs/modules/ROOT/pages/scalalib/testing.adoc new file mode 100644 index 00000000000..8cccab792b7 --- /dev/null +++ b/docs/modules/ROOT/pages/scalalib/testing.adoc @@ -0,0 +1,29 @@ += Testing Scala Projects +:page-aliases: Testing_Scala_Projects.adoc + +include::partial$gtag-config.adoc[] + +This page will discuss common topics around working with test suites using the Mill build tool + +== Defining Unit Test Suites + +include::partial$example/scalalib/testing/1-test-suite.adoc[] + + +== Test Dependencies + +include::partial$example/scalalib/testing/2-test-deps.adoc[] + +== Defining Integration Test Suites + +include::partial$example/scalalib/testing/3-integration-suite.adoc[] + +== Test Grouping + +include::partial$example/scalalib/testing/4-test-grouping.adoc[] + +== Github Actions Test Reports + +If you use Github Actions for CI, you can use https://github.com/mikepenz/action-junit-report in +your pipeline to render the generated `test-report.xml` files nicely on Github. See +https://github.com/com-lihaoyi/mill/pull/4218/files for an example integration \ No newline at end of file diff --git a/docs/modules/ROOT/pages/scalalib/web-examples.adoc b/docs/modules/ROOT/pages/scalalib/web-examples.adoc new file mode 100644 index 00000000000..f448f3b4bf8 --- /dev/null +++ b/docs/modules/ROOT/pages/scalalib/web-examples.adoc @@ -0,0 +1,42 @@ += Scala Web Project Examples +:page-aliases: Web_Build_Examples.adoc, Scala_Web_Examples.adoc + +include::partial$gtag-config.adoc[] + + +This page contains examples of using Mill as a build tool for web-applications. +It covers setting up a basic backend server, Todo-MVC app, topics like cache +busting, as well as usage of Scala.js both as standalone modules as well as +integrated with your backend Scala-JVM web server. + +== TodoMVC Web App + +include::partial$example/scalalib/web/1-todo-webapp.adoc[] + +== Webapp Cache Busting + +include::partial$example/scalalib/web/2-webapp-cache-busting.adoc[] + +== Scala.js Modules + +include::partial$example/scalalib/web/3-scalajs-module.adoc[] + +== Scala.js Webserver Integration + +include::partial$example/scalalib/web/4-webapp-scalajs.adoc[] + +== Scala.js/Scala-JVM Code Sharing + +include::partial$example/scalalib/web/5-webapp-scalajs-shared.adoc[] + +== Publishing Cross-Platform Scala Modules + +include::partial$example/scalalib/web/6-cross-version-platform-publishing.adoc[] + +== Publishing Cross-Platform Scala Modules Alternative + +include::partial$example/scalalib/web/7-cross-platform-version-publishing.adoc[] + +== Scala.js WebAssembly Example + +include::partial$example/scalalib/web/8-wasm.adoc[] \ No newline at end of file diff --git a/docs/modules/ROOT/partials/Installation_IDE_Support.adoc b/docs/modules/ROOT/partials/Installation_IDE_Support.adoc deleted file mode 100644 index d8a781f14ab..00000000000 --- a/docs/modules/ROOT/partials/Installation_IDE_Support.adoc +++ /dev/null @@ -1,369 +0,0 @@ -++++ - - -++++ - -For all the examples in this documentation, there is a `download` link that provides -a zip file containing the full example ready to use. These examples come with a `./mill` -script you can use to immediately begin working with the project, needing only a JVM installed -globally - -The best method of installing Mill is to just install a <<_bootstrap_scripts,bootstrap script>>. -This script can determine the best version to be used by a project (e.g. by -reading a `.mill-version` or `.config/mill-version` file or a `MILL_VERSION` environment variable) and will use this exact Mill version. -If the determined Mill version is not installed locally, it will be downloaded automatically. - -== IDE Support - -:link-metals: https://scalameta.org/metals/ - -Mill supports IntelliJ and VSCode, both via the standard -https://build-server-protocol.github.io/[Build Server Protocol] - -=== IntelliJ - -To use Mill with IntelliJ, first ensure you have the free -https://plugins.jetbrains.com/plugin/1347-scala[IntelliJ Scala Plugin] -installed. This is necessary as Mill build files are written in Scala, -even if you are using it to build a Java project. - -Once you have the plugin installed, you can use IntelliJ to open any project -containing a Mill `build.sc` file, and IntelliJ will automatically load the -Mill build. This will provide support both for your application code, -as well as the code in the `build.sc`: - -image::IntellijApp.png[] - -image::IntellijBuild.png[] - -If you make changes to your Mill `build.sc`, you can ask Intellij to load -those updates by opening the "BSP" tab and clicking the "Refresh" button - -image::IntellijRefresh.png[] - -==== IntelliJ IDEA XML Support - -Apart from using the Build Server Protocol, you can also generate IDEA project -files directly with Mill. This is probably the preferred way if you work on -polyglot projects and need support for other languages like Kotlin or AspectJ, -which are currently not specifically configured over BSP. - -To generate IntelliJ IDEA project files into `.idea/`, run: - -[source,bash] ----- -./mill mill.idea.GenIdea/idea ----- - -This will generate the XML files IntelliJ uses to configure your project - -``` -.idea -.idea/scala_settings.xml -.idea/mill_modules -.idea/mill_modules/.iml -.idea/mill_modules/mill-build.iml -.idea/mill_modules/test.iml -.idea/libraries -.idea/libraries/mill_scalalib_2_13_0_11_10_jar.xml -... -.idea/workspace.xml -.idea/modules.xml -.idea/scala_compiler.xml -.idea/misc.xml -``` - -After the files are generated, you can open the folder in IntelliJ to load the project -into your IDE. If you make changes to your Mill `build.sc`, you can update the project config -those updates by running `./mill mill.idea.GenIdea/idea` again. - -=== VSCode - -To use Mill with VSCode, first ensure you have the free -https://marketplace.visualstudio.com/items?itemName=scalameta.metals[Metals VSCode Scala language server] -installed. This is necessary as Mill build files are written in Scala, -even if you are using it to build a Java project. - -Once you have the language server installed, you can ask VSCode to open any folder -containing a Mill `build.sc` file, and VSCode will ask you to import your -Mill build. This will provide support both for your application code, -as well as the code in the `build.sc`: - -image::VSCodeApp.png[] - -image::VSCodeBuild.png[] - -If you make changes to your Mill `build.sc`, you can ask VSCode to load -those updates by opening the "BSP" tab and clicking the "Refresh" button - -image::VSCodeRefresh.png[] - -=== Debugging IDE issues - -In case things go wrong, it can be sometimes hard to find the cause. -BSP is just a protocol, and as such, issues on the server side (the Mill BSP -server) might not well propagated to the BSP client (your IDE). - -For that reason Mill BSP server is writing a log file under -`.bsp/mill-bsp.stderr`, where you can find various information about what's -going on. Its content should look familiar, as it contains regular Mill -output accompanied by additional BSP client-server communication details. - -You can increase the verbosity of that log file, when you run Mill with -`--debug` at installation time (of the BSP discovery file). - -[source,bash] ----- -mill --debug mill.bsp.BSP/install ----- - -=== BSP Gotchas: Mismatching JVM versions - -A common issue for poor performance can be a mismatch of the JVMs. -In such a case the Mill BSP server started by a BSP client like Metals is using a _different_ JVM than `mill` is using when started from the command line. -In such a scenario, every Mill invocation using the other JVM will inadvertently invalidate Mills target caches. -This effectively leads to full reevaluation of all invoked Mill targets and appears as "bad performance". - -To detect if this is the case, - -1. import the project in the BSP client. -2. Wait until the BSP import process has finished. -3. Then run `mill __.compile` in the terminal. -4. Now watch the IDE, to see if the compile command invoked in the terminal has triggered compilation on the build server. - -If this happens, you're using different JVMs. -Unfortunately, this is rather difficult to "defend" programmatically in Mill itself. -It is an explicit design goal of Mill, that it should work in different environments. -It is evident and likely unintentional that you have two conflicting local environments. - -To fix this it's required to find where this difference stems from. -As a starting point, - -* Find out which JVM is used for Mill on the CLi. `mill --version` reports its JVM. - -* Search the `.bsp/mill-bsp.stderr` file for a line starting with `"Updating Evaluator"`. -It should contain all env variables (particularly `JAVA_HOME`) used by the BSP server. - -Once you found and fixed that and the environments are the same, `clean` and restarting BSP should work as expected. - -[#_bootstrap_scripts] -== Bootstrap Scripts - -Although the Mill example projects come with their own `./mill` bootstrap script, -you can also download it manually: - -[source,bash,subs="verbatim,attributes"] ----- -curl -L {mill-github-url}/releases/download/{mill-last-tag}/{mill-last-tag} > mill && chmod +x mill -echo {mill-last-tag} > .mill-version ----- - -Downloading a `mill` bootstrap script to the root of your project repository helps make it easier for -new contributors to build your project, as they don't have to install Mill before they can start. -Anyone who wants to work with the project can simply use the `./mill` script directly. - -[source,bash] ----- -./mill --version -./mill __.compile # double underscore ----- - -In general, bootstrap scripts are the recommended way of installing Mill. -Similar to `./gradlew` or `./mvnw`, the `./mill` bootstrap script -reduces the chance of errors due to the installed version of Mill -being incompatible with the version expected by your build. -In-project bootstrap scripts are also useful for running Mill in CI/CD, ensuring -that your build server like Jenkins or Github Actions has the correct version of Mill -present to build, compile or test your code. - -=== `millw` - -You can also use https://github.com/lefou/millw[lefou/millw] as a drop-in replacement for `mill`. -This script is a bit more advanced, and supports running on all major platforms including MS Windows. - -The script supports to following sources when determining the best Mill version to use: - -* `MILL_VERSION` environment variable -* `.mill-version` file in the project directory -* `.config/mill-version` file in the project directory -* if non was defined so far, it can also check for the latest available version online -* `DEFAULT_MILL_VERSION` environment variable - -Using a system-wide installed `millw` is probably the robustest way to use Mill on any operating system. -It also preserves support of rather old Mill versions next to recent ones, so that updating it or switching to `millw` initially should be a no-brainer. - -You can also install it on Homebrew via https://github.com/lefou/homebrew-millw[homebrew-millw]: - -[source,sh] ----- -brew install lefou/millw/millw ----- - - -== Updating Mill - -Typically, most Mill projects use a `.mill-version` file to configure what version -to use. You can update the version specified in this file in order to change the version -of Mill. The file path `.config/mill-version` is also supported. If neither is provided, -the `./mill` bootstrap script will use the `DEFAULT_MILL_VERSION` it has built in. - -To choose a different Mill version on an ad-hoc basis, e.g. for experimentation, you can pass -in a `MILL_VERSION` environment variable, e.g. - -[source,bash] ----- -MILL_VERSION=0.5.0-3-4faefb mill __.compile ----- - -or - -[source,bash] ----- -MILL_VERSION=0.5.0-3-4faefb ./mill __.compile ----- - -to override the Mill version manually. This takes precedence over the version -specified in `./mill`, `.config/mill-version` or `.mill-version` - -== Other installation methods - -Of course, you can also use the package manager of your operating system or distribution. -Please keep in mind, that all those solutions listed below are maintained outside of Mill and may not have the same features as the <<_bootstrap_scripts,bootstrap scripts>>. - -CAUTION: Some of the installations via package managers install a fixed version of Mill and do not support project-specific selection of the preferred Mill version. If you want to use the `MILL_VERSION` environment variable or need support for `.mill-version` or `.config/mill-version` files to control the actual used Mill version, please use a <<_bootstrap_scripts,Bootstrap script>> instead. - -=== OS X - -Installation via https://github.com/Homebrew/homebrew-core/blob/master/Formula/mill.rb[homebrew]: - -[source,sh] ----- -brew install mill ----- - - -=== Arch Linux - -Arch Linux has an https://archlinux.org/packages/extra/any/mill/[Extra package for mill]: - -[source,bash] ----- -pacman -S mill - ----- - -=== FreeBSD - -Installation via http://man.freebsd.org/pkg/8[pkg(8)]: - -[source,sh] ----- -pkg install mill - ----- - -=== Gentoo Linux - -[source,sh] ----- -emerge dev-java/mill-bin - ----- - -=== Windows - -To get started, download Mill from -{mill-github-url}/releases/download/{mill-last-tag}/{mill-last-tag}-assembly[Github releases], and save it as `mill.bat`. - -If you're using https://scoop.sh[Scoop] you can install Mill via - -[source,bash] ----- -scoop install mill ----- - -=== WSL / MSYS2 / Cycgin / Git-Bash - -Mill also works on "sh" environments on Windows (e.g., -https://www.msys2.org[MSYS2], -https://www.cygwin.com[Cygwin], -https://gitforwindows.org[Git-Bash], -https://docs.microsoft.com/en-us/windows/wsl[WSL]); to get started, follow the instructions in the <<_manual>> -section. Note that: - -* In some environments (such as WSL), Mill might have to be run without a server (using `-i`, `--interactive`, or `--no-server`.) - -* On Cygwin, run the following after downloading mill: - -[source,bash] ----- -sed -i '0,/-cp "\$0"/{s/-cp "\$0"/-cp `cygpath -w "\$0"`/}; 0,/-cp "\$0"/{s/-cp "\$0"/-cp `cygpath -w "\$0"`/}' /usr/local/bin/mill ----- - -==== Docker - -You can download and run -a https://hub.docker.com/r/nightscape/scala-mill/["Docker image containing OpenJDK, Scala and Mill"] using - -[source,bash] ----- -docker pull nightscape/scala-mill -docker run -it nightscape/scala-mill ----- - -=== Manual - -To get started, download Mill and install it into your HOME ".local/bin" via the following -`curl`/`chmod` command: - -[source,bash,subs="verbatim,attributes"] ----- -sh -c "curl -L {mill-github-url}/releases/download/{mill-last-tag}/{mill-last-tag} > ~/.local/bin/mill && chmod +x ~/.local/bin/mill" ----- - -=== Coursier (unsupported) - -Installing mill via `coursier` or `cs` is currently not officially supported. -There are various issues, especially with interactive mode. - -=== Asdf (unsupported) - -You can install and manage Mill via the Multiple Runtime Version Manager - https://asdf-vm.com/[`asdf`]. - -Support by `asdf` is currently possible by using the https://github.com/asdf-community/asdf-mill[`asdf-mill` plugin]: - -.Steps to install the `mill` plugin and Mill with `asdf` -[source,bash] ---- -asdf plugin add mill -asdf install mill latest -asdf global mill latest ---- - - -=== Automatic Mill updates - -If your project is hosted on GitHub, GitLab, or Bitbucket, you can use -https://github.com/scala-steward-org/scala-steward[Scala Steward] to -automatically open a pull request to update your Mill version (in -`.mill-version` or `.config/mill-version` file), whenever there is a newer version available. - -TIP: Scala Steward can also -xref:Scala_Module_Config.adoc#_keeping_up_to_date_with_scala_steward[scan your project dependencies] -and keep them up-to-date. - -=== Development Releases - -In case you want to try out the latest features and improvements that are -currently in the main branch, unstable versions of Mill -are -https://github.com/com-lihaoyi/mill/releases[available] as binaries named -`+#.#.#-n-hash+` linked to the latest tag. - -The easiest way to use a development release is to use one of the -<<_bootstrap_scripts>>, which support <<_overriding_mill_versions>> via an -`MILL_VERSION` environment variable or a `.mill-version` or `.config/mill-version` file. - - diff --git a/docs/modules/ROOT/partials/Intro_Header.adoc b/docs/modules/ROOT/partials/Intro_Header.adoc new file mode 100644 index 00000000000..3ad1bf90e41 --- /dev/null +++ b/docs/modules/ROOT/partials/Intro_Header.adoc @@ -0,0 +1,9 @@ +This page contains a quick introduction to getting start with using Mill to build +a simple {language} program. We will walk through a series of Mill builds of increasing +complexity to show you the key features and usage of the Mill build tool. + +The other pages of this section on {language} go into more depth into individual features, +with more examples of how to use Mill for {language} and more details of how the Mill +build tool works. They aren't intended to be read comprehensively top-to-bottom, but +rather looked up when you have a particular interest e.g. in testing, linting, +publishing, and so on. \ No newline at end of file diff --git a/docs/modules/ROOT/partials/Intro_to_Mill_BlogVideo.adoc b/docs/modules/ROOT/partials/Intro_to_Mill_BlogVideo.adoc deleted file mode 100644 index 8c53832801b..00000000000 --- a/docs/modules/ROOT/partials/Intro_to_Mill_BlogVideo.adoc +++ /dev/null @@ -1,14 +0,0 @@ - -If you're interested in the fundamental ideas behind Mill, rather than the user-facing -benefits discussed above, the following presentation -and companion blog post is a good introduction into what the Mill build tool is -all about: - -- https://www.youtube.com/watch?v=UsXgCeU-ovI&list=PLLMLOC3WM2r6ZFhFfVH74W-sl8LfWtOEc&index=15[Video: A Deep Dive into the Mill Build Tool] -- https://www.lihaoyi.com/post/SoWhatsSoSpecialAboutTheMillScalaBuildTool.html[Blog Post: What's So Special About The Mill Scala Build Tool?] - -The rest of this page contains a quick introduction to getting start with using -Mill to build a simple {language} program. The other pages of this doc-site go into -more depth, with more examples of how to use Mill and more details of how the -Mill build tool works. - diff --git a/docs/modules/ROOT/partials/Intro_to_Mill_Footer.adoc b/docs/modules/ROOT/partials/Intro_to_Mill_Footer.adoc deleted file mode 100644 index dc851ab5795..00000000000 --- a/docs/modules/ROOT/partials/Intro_to_Mill_Footer.adoc +++ /dev/null @@ -1,110 +0,0 @@ - -== Watch and Re-evaluate - -You can use the `--watch` flag to make Mill watch a task's inputs, -re-evaluating the task as necessary when the inputs -change: - -[source,bash] ----- -$ mill --watch foo.compile -$ mill --watch foo.run -$ mill -w foo.compile -$ mill -w foo.run ----- - -Mill's `--watch` flag watches both the files you are building using Mill, as -well as Mill's own `build.mill` file and anything it imports, so any changes to -your `build.mill` will automatically get picked up. - -For long-running processes like web servers, you can use `runBackground` to make sure they recompile and restart when code changes, -forcefully terminating the previous process even though it may be still alive: - -[source,bash] ----- -$ mill -w foo.compile -$ mill -w foo.runBackground ----- - - -== Parallel Task Execution - -By default, mill will evaluate all tasks in parallel, with the number of concurrent -tasks equal to the number of cores on your machine. - -You can use the `--jobs` (`-j`) to configure explicitly how many concurrent tasks you -wish to run - -Example: Use up to 4 parallel threads to compile all modules: - -[source,bash] ----- -mill -j 4 __.compile ----- - -To disable parallel execution use `--jobs 1`. - -`mill` generates an output file in `out/mill-chrome-profile.json` that can be -loaded into the Chrome browser's `chrome://tracing` page for visualization. -This can make it much easier to analyze your parallel runs to find out what's -taking the most time: - -image::ChromeTracing.png[ChromeTracing.png] - -Please note that the maximal possible parallelism depends on your project. -Tasks that depend on each other can't be processed in parallel. - -== Command-line usage - -Mill is a command-line tool and supports various options. - -Run `mill --help` for a complete list of options - -.Output of `mill --help` -[source,subs="verbatim,attributes"] ----- -Mill Build Tool, version {mill-version} -usage: mill [options] [[target [target-options]] [+ [target ...]]] - -D --define Define (or overwrite) a system property. - -b --bell Ring the bell once if the run completes successfully, twice if - it fails. - --bsp Enable BSP server mode. - --color Enable or disable colored output; by default colors are enabled - in both REPL and scripts mode if the console is interactive, and - disabled otherwise. - -d --debug Show debug output on STDOUT - --disable-callgraph-invalidation Disable the fine-grained callgraph-based target invalidation in - response to code changes, and instead fall back to the previous - coarse-grained implementation relying on the script `import - $file` graph - --disable-ticker Disable ticker log (e.g. short-lived prints of stages and - progress bars). - --enable-ticker Enable ticker log (e.g. short-lived prints of stages and - progress bars). - -h --home (internal) The home directory of internally used Ammonite script - engine; where it looks for config and caches. - --help Print this help message and exit. - -i --interactive Run Mill in interactive mode, suitable for opening REPLs and - taking user input. This implies --no-server and no mill server - will be used. Must be the first argument. - --import Additional ivy dependencies to load into mill, e.g. plugins. - -j --jobs Allow processing N targets in parallel. Use 1 to disable - parallel and 0 to use as much threads as available processors. - -k --keep-going Continue build, even after build failures. - --meta-level Experimental: Select a meta-build level to run the given - targets. Level 0 is the normal project, level 1 the first - meta-build, and so on. The last level is the built-in synthetic - meta-build which Mill uses to bootstrap the project. - --no-server Run Mill in single-process mode. In this mode, no Mill server - will be started or used. Must be the first argument. - -s --silent Make ivy logs during script import resolution go silent instead - of printing; though failures will still throw exception. - -v --version Show mill version information and exit. - -w --watch Watch and re-run your scripts when they change. - target ... The name or a pattern of the target(s) you want to build, - followed by any parameters you wish to pass to those targets. To - specify multiple target names or patterns, use the `+` - separator. ----- - -All _options_ must be given before the first target. diff --git a/docs/modules/ROOT/partials/Intro_to_Mill_Header.adoc b/docs/modules/ROOT/partials/Intro_to_Mill_Header.adoc deleted file mode 100644 index b0d6d9fe5f5..00000000000 --- a/docs/modules/ROOT/partials/Intro_to_Mill_Header.adoc +++ /dev/null @@ -1,54 +0,0 @@ -[graphviz] -.... -digraph G { - rankdir=LR - node [shape=box width=0 height=0 style=filled fillcolor=white] - bgcolor=transparent - newrank=true; - subgraph cluster_0 { - style=dashed - node [shape=box width=0 height=0 style=filled fillcolor=white] - label = "foo"; - - "foo.sources" -> "foo.compile" -> "foo.classPath" -> "foo.assembly" - "foo.mainClass" -> "foo.assembly" - } - subgraph cluster_1 { - style=dashed - node [shape=box width=0 height=0 style=filled fillcolor=white] - label = "bar"; - - "foo.classPath" -> "bar.compile" - "foo.classPath" -> "bar.classPath" - "bar.sources" -> "bar.compile" -> "bar.classPath" -> "bar.assembly" - "bar.mainClass" -> "bar.assembly" - } -} -.... - -{mill-github-url}[Mill] is a fast multi-language build tool that supports {language}, making your -common development workflows xref:Case_Study_Mill_vs_Maven.adoc[5-10x faster to Maven], or -xref:Case_Study_Mill_vs_Gradle.adoc[2-4x faster than Gradle], with -xref:Case_Study_Mill_vs_SBT.adoc[an easier experience than SBT]. -Mill aims to make your JVM project's build process performant, maintainable, and flexible -even as it grows from a small project to a large codebase or monorepo with hundreds of modules: - -* *Performance*: Mill's xref:Tasks.adoc[build graph] automatically - xref:The_Mill_Evaluation_Model.adoc#_caching_at_each_layer_of_the_evaluation_model[caches] - and xref:#_parallel_task_execution[parallelizes] build - tasks, keeping your workflows fast and responsive. Mill adds minimal overhead over - the logic necessary to build your project, while providing tools to let you identify - and resolve bottlenecks in your build - -* *Maintainability*: Mill config and custom logic is written in - xref:_custom_build_logic[custom build logic] in concise type-checked code, - rather than shell scripts, XML or YAML. This lets IDEs - (xref:{language}_Installation_IDE_Support.adoc#_intellij[IntelliJ] or - xref:{language}_Installation_IDE_Support.adoc#_vscode[VSCode]) - "jump-to-definition" to navigate around your build as easily as any application codebase. - -* *Flexibility*: Mill's custom tasks and modules allow anything from - xref:Tasks.adoc#primitive-tasks[adding simple pipelines], up to - xref:Modules.adoc#_use_case_diy_java_modules[entire language toolchains]. - This means Mill can be easily customized to fit exactly what you need without being beholden - to third-party plugins which may not work well together or be well maintained. diff --git a/docs/modules/ROOT/partials/Publishing_Footer.adoc b/docs/modules/ROOT/partials/Publishing_Footer.adoc new file mode 100644 index 00000000000..50f02c296a8 --- /dev/null +++ b/docs/modules/ROOT/partials/Publishing_Footer.adoc @@ -0,0 +1,152 @@ +== Publishing to Sonatype Maven Central + +Once you've mixed in `PublishModule`, apart from publishing locally, you can also publish +your project's modules to maven central + +=== GPG + +If you've never created a keypair before that can be used to sign your artifacts +you'll need to do this. https://central.sonatype.org/publish/requirements/gpg/[Sonatype's GPG Documentation] +has the instructions on how to do this + +=== Publishing Secrets + +Mill uses the following environment variables as a way to pass the necessary secrets +for publishing: + + +[source,bash] +---- +# The LHS and RHS of the User Token, accessible through the sonatype +# website `Profile` / `User Token` / `Access User Token` +export MILL_SONATYPE_USERNAME=... +export MILL_SONATYPE_PASSWORD=... + +# The base-64 encoded PGP key, which can be encoded in the following way +# for each OS: +# +# MacOS or FreeBSD +# gpg --export-secret-key -a $LONG_ID | base64 +# +# Ubuntu (assuming GNU base64) +# gpg --export-secret-key -a $LONG_ID | base64 -w0 +# +# Arch +# gpg --export-secret-key -a $LONG_ID | base64 | sed -z 's;\n;;g' +# +# Windows +# gpg --export-secret-key -a %LONG_ID% | openssl base64 +export MILL_PGP_SECRET_BASE64=... + +# The passphrase associated with your PGP key +export MILL_PGP_PASSPHRASE=... +---- + +=== Publishing + +You can publish all eligible modules in your Mill project using +the xref:fundamentals/modules.adoc#_default_tasks[default task] of the +xref:fundamentals/modules.adoc#_external_modules[External Module] `mill.scalalib.PublishModule`: + +[source,bash] +---- +mill mill.scalalib.PublishModule/ +---- + +You can also specify individual modules you want to publish via a selector: + +[source,bash] +---- +mill mill.scalalib.PublishModule/ foo.publishArtifacts +---- + +The default URL for publishing to sonatype's Maven Central is `oss.sonatype.org`. +Newer projects registered on sonatype may need to publish using `s01.oss.sonatype.org`. +In that case, you can pass in a `--sonatypeUri`: + +[source,bash] +---- +mill mill.scalalib.PublishModule/ \ + --sonatypeUri https://s01.oss.sonatype.org/service/local +---- + +This also allows you to publish to your own internal corporate sonatype deployment, +by passing in `--sonatypeUri example.company.com` instead. + +[NOTE] +-- +Since Feb. 2021 any new Sonatype accounts have been created on +`s01.oss.sonatype.org`, so you'll want to ensure you set the relevant URIs to match. + +The symptom of using the "wrong" URL for publishing is typically a 403 error code, in response to the publish request. + +See https://central.sonatype.org/publish/publish-guide/#releasing-to-central + +Typically + +* `https://s01.oss.sonatype.org/service/local` - for the `--sonatypeUri` +* `https://s01.oss.sonatype.org/content/repositories/snapshots` - for `sonatypeSnapshotUri` + +-- +=== Publishing Using Github Actions + + +To publish on Github Actions, you can use something like this: + +```yaml +# .github/workflows/publish-artifacts.yml +name: Publish Artifacts +on: + push: + tags: + - '**' + workflow_dispatch: +jobs: + publish-artifacts: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-java@v3 + with: + distribution: 'temurin' + java-version: '17' + - run: ./mill mill.scalalib.PublishModule/ + env: + MILL_PGP_PASSPHRASE: ${{ secrets.MILL_PGP_PASSPHRASE }} + MILL_PGP_SECRET_BASE64: ${{ secrets.MILL_PGP_SECRET_BASE64 }} + MILL_SONATYPE_PASSWORD: ${{ secrets.MILL_SONATYPE_PASSWORD }} + MILL_SONATYPE_USERNAME: ${{ secrets.MILL_SONATYPE_USERNAME }} +``` + +Where `MILL_PGP_PASSPHRASE`, `MILL_PGP_SECRET_BASE64`, `MILL_SONATYPE_PASSWORD`, and +`MILL_SONATYPE_USERNAME` configured for the repository's or organization's Github Actions +workflows. See +https://docs.github.com/en/actions/security-for-github-actions/security-guides/using-secrets-in-github-actions[Using Secrets in Github Actions] +for more details. + +=== Non-Staging Releases (classic Maven uploads) + +If the site does not support staging releases as `oss.sonatype.org` and `s01.oss.sonatype.org` do (for +example, a self-hosted OSS nexus site), you can pass in the +`--stagingRelease false` option to simply upload release artifacts to corresponding +maven path under `sonatypeUri` instead of staging path. + +[source,bash] +---- +mill mill.scalalib.PublishModule/ \ + foo.publishArtifacts \ + lihaoyi:$SONATYPE_PASSWORD \ + --sonatypeUri http://example.company.com/release \ + --stagingRelease false +---- + + +== Publishing to other repositories + +While Sonatype Maven Central is the default publish repository for JVM ecosystem projects, +there are also others that you can use. Mill supports these largely through contrib plugins: + +* xref:contrib/codeartifact.adoc[] +* xref:contrib/artifactory.adoc[] +* xref:contrib/bintray.adoc[] +* xref:contrib/sonatypecentral.adoc[] \ No newline at end of file diff --git a/docs/modules/ROOT/partials/gtag-config.adoc b/docs/modules/ROOT/partials/gtag-config.adoc new file mode 100644 index 00000000000..6f6f521b0e4 --- /dev/null +++ b/docs/modules/ROOT/partials/gtag-config.adoc @@ -0,0 +1,22 @@ +++++ + + +++++ diff --git a/docs/package.mill b/docs/package.mill index 07b939d726b..13e706d0ef6 100644 --- a/docs/package.mill +++ b/docs/package.mill @@ -1,16 +1,29 @@ package build.docs +import org.jsoup._ import mill.util.Jvm import mill._, scalalib._ -import build.contrib import de.tobiasroeser.mill.vcs.version.VcsVersion +import scala.jdk.CollectionConverters._ + /** Generates the mill documentation with Antora. */ object `package` extends RootModule { // This module isn't really a ScalaModule, but we use it to generate // consolidated documentation using the Scaladoc tool. object site extends UnidocModule { + def unidocCompileClasspath = + super.unidocCompileClasspath().filter { ref => + // Workaround for https://github.com/scala/bug/issues/10028 + // We exclude the JAR of coursier/dependency here, so that + // scaladoc doesn't create an entry for its `dependency` + // package object in the Mill scaladoc. + !ref.path.lastOpt.exists(_.startsWith("dependency_2.13-")) + } def scalaVersion = build.Deps.scalaVersion - def moduleDeps = build.millInternal.modules.collect { case m: build.MillStableScalaModule => m } - def unidocSourceUrl = T { + def moduleDeps = build.millInternal.modules.collect { + case m: build.MillStableScalaModule => m + case m: JavaModule if m eq build.kotlinlib => m + } + def unidocSourceUrl = Task { val sha = VcsVersion.vcsState().currentRevision Some(s"${build.Settings.projectUrl}/blob/$sha") } @@ -23,12 +36,12 @@ object `package` extends RootModule { Jvm.runSubprocess( commandArgs = Seq( npmExe, + "--no-audit", "install", "@antora/cli@3.1.9", "@antora/site-generator-default@3.1.9", "gitlab:antora/xref-validator", "@antora/lunr-extension@v1.0.0-alpha.6", - "asciidoctor-kroki@0.18.1" ), envArgs = Map(), workingDir = npmDir @@ -36,7 +49,7 @@ object `package` extends RootModule { } def runAntora(npmDir: os.Path, workDir: os.Path, args: Seq[String])(implicit - ctx: mill.api.Ctx.Log + ctx: mill.api.Ctx.Log ) = { prepareAntora(npmDir) @@ -53,7 +66,7 @@ object `package` extends RootModule { def source0 = T.source(millSourcePath) def projectReadme = T.source(T.workspace / "readme.adoc") - def source = T { + def source = Task { os.copy(source0().path, T.dest, mergeFolders = true) val pagesWd = T.dest / "modules" / "ROOT" / "pages" @@ -70,7 +83,7 @@ object `package` extends RootModule { for ((name, pref) <- renderedExamples) os.copy( pref.path, - pagesWd / "example" / os.SubPath(s"$name.adoc"), + partialsWd / "example" / os.SubPath(s"$name.adoc"), createFolders = true ) @@ -86,35 +99,96 @@ object `package` extends RootModule { createFolders = true ) + expandDiagramsInDirectoryAdocFile(T.dest, mill.main.VisualizeModule.classpath().map(_.path)) + PathRef(T.dest) } + def expandDiagramsInDirectoryAdocFile(dest: os.Path, + visualizeClassPath: Agg[os.Path]) + (implicit ctx: mill.api.Ctx) = { + + // Walk all files to render graphviz templates ourselves because the only Antora graphviz + // plugin (Kroki) relies on an online web service that is super slow and flaky + def walkAllFiles(inputs: Map[(os.Path, Int), String]): Map[(os.Path, Int), String] = { + val output = collection.mutable.Map.empty[(os.Path, Int), String] + for (p <- os.walk(dest) if p.ext == "adoc"){ + val outputLines = collection.mutable.ArrayDeque.empty[String] + val graphvizLines = collection.mutable.ArrayDeque.empty[String] + var isGraphViz = false + var isGraphViz0 = false + + for((line, i) <- os.read.lines(p).zipWithIndex){ + line match{ + case "[graphviz]" => isGraphViz0 = true + case "...." if isGraphViz0 => isGraphViz0 = false; isGraphViz = true + case "```graphviz" => isGraphViz = true + case "```" | "...." if isGraphViz => + isGraphViz = false + if (inputs.isEmpty) output((p, i)) = graphvizLines.mkString("\n") + else { + outputLines.append("++++") + outputLines.append("
") + outputLines.append(inputs((p, i))) + outputLines.append("
") + outputLines.append("++++") + } + + graphvizLines.clear() + case _ => + if (isGraphViz) graphvizLines.append(line) + else outputLines.append(line) + } + } + if (inputs.nonEmpty) os.write.over(p, outputLines.mkString("\n")) + } + output.toMap + } + + val diagrams = walkAllFiles(Map()) + // Batch the rendering so later it can be done in one call to a single subprocess, + // minimizing per-subprocess overhead needed to spawn them over and over + val orderedDiagrams = diagrams.toSeq.map{case ((p, i), s) => (p, i, os.temp(s), os.temp.dir())} + + mill.util.Jvm.runSubprocess( + "mill.main.graphviz.GraphvizTools", + visualizeClassPath, + mainArgs = orderedDiagrams.map{case (p, i, src, dest) => s"$src;$dest;svg"} + ) + + walkAllFiles(orderedDiagrams.map{case (p, i, src, dest) => ((p, i), os.read(dest / "out.svg"))}.toMap) + + } + def supplementalFiles = T.source(millSourcePath / "supplemental-ui") /** - * The doc root ready to be build by antora for the current branch. + * The doc root ready to be built by antora for the current branch. */ - def devAntoraSources: T[PathRef] = T { + def devAntoraSources: T[PathRef] = Task { val dest = T.dest os.copy(source().path, dest, mergeFolders = true) - sanitizeAntoraYml(dest, "master", build.millVersion(), build.millLastTag()) + sanitizeAntoraYml( + dest, + "main-branch", + "main-branch", + build.millLastTag() + ) PathRef(dest) } def sanitizeAntoraYml( - dest: os.Path, - version: String, - millVersion: String, - millLastTag: String - ): Unit = { - val isPreRelease = (millVersion != millLastTag) || Seq("-M", "-RC").exists(millVersion.contains) + dest: os.Path, + version: String, + displayVersion: String, + millLastTag: String + ): Unit = { + val isPreRelease = version == "main-branch" || Seq("-M", "-RC").exists(version.contains) + val preReleaseSuffix = if (isPreRelease) "\nprerelease: true" else "" val lines = os.read(dest / "antora.yml").linesIterator.map { case s"version:$_" => - if (isPreRelease) - s"version: '${version}'\ndisplay-version: '${millVersion}'\nprerelease: true" - else - s"version: '${version}'\ndisplay-version: '${millVersion}'" - case s" mill-version:$_" => s" mill-version: '$millVersion'" + s"version: '$version'\ndisplay-version: '$displayVersion'$preReleaseSuffix" + case s" mill-version:$_" => s" mill-version: '$millLastTag'" case s" mill-last-tag:$_" => s" mill-last-tag: '$millLastTag'" case l => l } @@ -126,16 +200,23 @@ object `package` extends RootModule { os.write.over(dest / "antora.yml", (lines ++ newLines).mkString("\n")) } + def blogFolder0 = Task.Source(build.millSourcePath / "blog") + def blogFolder = Task{ + os.copy(blogFolder0().path, Task.dest, mergeFolders = true) + expandDiagramsInDirectoryAdocFile(Task.dest, mill.main.VisualizeModule.classpath().map(_.path)) + PathRef(Task.dest) + } def githubPagesPlaybookText(authorMode: Boolean) = T.task { extraSources: Seq[os.Path] => val taggedSources = for (path <- extraSources) yield { s""" - url: ${build.baseDir} | start_path: ${path.relativeTo(build.baseDir)} + | edit_url: false |""".stripMargin } s"""site: - | title: The Mill Build Tool + | title: The Mill JVM Build Tool | url: ${if (authorMode) s"${T.dest}/site" else build.Settings.docUrl} - | start_page: mill::Java_Intro_to_Mill.adoc + | start_page: mill::index.adoc | keys: | google_analytics: 'G-1C582ZJR85' | @@ -143,13 +224,17 @@ object `package` extends RootModule { | sources: | - url: ${if (authorMode) build.baseDir else build.Settings.projectUrl} | branches: [] - | tags: ${build.Settings.legacyDocTags.map("'" + _ + "'").mkString("[", ",", "]")} + | tags: ${build.Settings.legacyDocTags.filter(_ => !authorMode).map("'" + _ + "'").mkString("[", ",", "]")} + | edit_url: false | start_path: docs/antora | |${taggedSources.mkString("\n\n")} | | - url: ${build.baseDir} | start_path: ${devAntoraSources().path.relativeTo(build.baseDir)} + | + | - url: ${build.baseDir} + | start_path: ${blogFolder().path.relativeTo(build.baseDir)} |ui: | bundle: | url: https://gitlab.com/antora/antora-ui-default/-/jobs/artifacts/master/raw/build/ui-bundle.zip?job=bundle-stable @@ -163,9 +248,6 @@ object `package` extends RootModule { | utest-github-url: https://github.com/com-lihaoyi/utest | upickle-github-url: https://github.com/com-lihaoyi/upickle | mill-scip-version: ${build.Deps.DocDeps.millScip.dep.version} - | kroki-fetch-diagram: true - | extensions: - | - asciidoctor-kroki |antora: | extensions: | - require: '@antora/lunr-extension' @@ -178,36 +260,48 @@ object `package` extends RootModule { |""".stripMargin } - def oldDocSources = T { - for (oldVersion <- build.Settings.docTags) yield { - val checkout = T.dest / oldVersion + def oldDocSources: T[Seq[PathRef]] = Task { + val versionLabels = + build.Settings.docTags.map{v => + val xVersion = v.split('.').dropRight(1).mkString(".") + ".x" + (v, xVersion, xVersion) + } ++ + // Set the latest stable branch as the "master" docs that people default to + Seq((build.Settings.docTags.last, "master", s"latest-${build.Settings.docTags.last}")) + + for ((millLastTag, version, displayVersion) <- versionLabels) yield { + val checkout = T.dest / displayVersion os.proc("git", "clone", T.workspace / ".git", checkout).call(stdout = os.Inherit) - os.proc("git", "checkout", oldVersion).call(cwd = checkout, stdout = os.Inherit) + os.proc("git", "checkout", millLastTag).call(cwd = checkout, stdout = os.Inherit) val outputFolder = checkout / "out" / "docs" / "source.dest" os.proc("./mill", "-i", "docs.source").call(cwd = checkout, stdout = os.Inherit) - sanitizeAntoraYml(outputFolder, oldVersion, oldVersion, oldVersion) + expandDiagramsInDirectoryAdocFile(outputFolder, mill.main.VisualizeModule.classpath().map(_.path)) + + sanitizeAntoraYml(outputFolder, version, displayVersion, millLastTag) PathRef(outputFolder) } } - def githubPages: T[PathRef] = T { + def githubPages: T[PathRef] = Task { generatePages(authorMode = false)().apply(oldDocSources().map(_.path)) } - def localPages = T { + def localPages: T[PathRef] = Task { val pages = generatePages(authorMode = true)().apply(oldDocSources().map(_.path)) T.log.outputStream.println( - s"You can browse the local pages at: ${(pages.path / "index.html").toNIO.toUri()}" + s"You can browse the pages at: ${(pages.path / "index.html").toNIO.toUri()}" ) + pages } - def fastPages = T { + def fastPages: T[PathRef] = Task { val pages = generatePages(authorMode = true)().apply(Nil) T.log.outputStream.println( - s"You can browse the local pages at: ${(pages.path / "index.html").toNIO.toUri()}" + s"You can browse the pages at: ${(pages.path / "index.html").toNIO.toUri()}" ) + pages } - def generatePages(authorMode: Boolean) = T.task { extraSources: Seq[os.Path] => + def generatePages(authorMode: Boolean) = Task.Anon { extraSources: Seq[os.Path] => T.log.errorStream.println("Creating Antora playbook ...") // dependency to sources source() @@ -247,11 +341,11 @@ object `package` extends RootModule { } def sanitizeDevUrls( - dir: os.Path, - sourceDir: os.Path, - newSourceDir: os.Path, - baseDir: os.Path - ): Unit = { + dir: os.Path, + sourceDir: os.Path, + newSourceDir: os.Path, + baseDir: os.Path + ): Unit = { val pathToRemove = sourceDir.relativeTo(baseDir).toString() val replacePath = newSourceDir.relativeTo(baseDir).toString() @@ -286,4 +380,130 @@ object `package` extends RootModule { } } } + + def allLinksAndAnchors: T[IndexedSeq[(os.Path, Seq[(String, String)], Seq[(String, String)], Set[String])]] = Task { + val base = fastPages().path + val validExtensions = Set("html", "scala") + for (path <- os.walk(base) if validExtensions(path.ext)) + yield { + val parsed = Jsoup.parse(os.read(path)) + val (remoteLinks, localLinks) = parsed + .select("a") + .asScala + .map(e => (e.toString, e.attr("href"))) + .toSeq + .filter(!_._2.startsWith("mailto:")) + .partition{case (e, l) => l.startsWith("http://") || l.startsWith("https://")} + ( + path, + remoteLinks, + localLinks.map{case (e, l) => (e, l.stripPrefix("file:"))}, + parsed.select("*").asScala.map(_.attr("id")).filter(_.nonEmpty).toSet, + ) + } + } + + def brokenRemoteLinks: T[Map[os.Path, Seq[(String, String, Int)]]] = Task{ + val allLinks = allLinksAndAnchors() + .flatMap { case (path, remoteLinks, localLinks, ids) => remoteLinks } + .map(_._2) + .filter{l => + // ignore example links since those are expected to be unresolved until + // a stable version is published and artifacts are uploaded to github + !l.contains("/example/") && + !l.contains("/releases/download/") && + // Ignore internal repo links in the changelog because there are a lot + // of them, and they're not very interesting to check and verify. + !l.contains("https://github.com/com-lihaoyi/mill/pull/") && + !l.contains("https://github.com/com-lihaoyi/mill/milestone/") && + !l.contains("https://github.com/com-lihaoyi/mill/compare/") && + // Link meant for API configuration, not for clicking + !l.contains("https://s01.oss.sonatype.org/service/local") && + // Somehow this server doesn't respond properly to HEAD requests even though GET works + !l.contains("https://marketplace.visualstudio.com/items") + } + .toSet + + // Try to fetch all the links serially. It isn't worth trying to parallelize it + // because if we go too fast the remote websites tend to rate limit us anyway + val linksToStatusCodes = allLinks.toSeq.zipWithIndex + .map{ case (link, i) => + val key = s"$i/${allLinks.size}" + println(s"Checking link $link $key") + val start = System.currentTimeMillis() + val res = requests.head(link, check = false).statusCode + val duration = System.currentTimeMillis() - start + val remaining = 1000 - duration + if (remaining > 0) Thread.sleep(remaining) // try to avoid being rate limited + (link, res) + } + .toMap + + allLinksAndAnchors() + .map{case (path, remoteLinks, localLinks, ids) => + ( + path, + remoteLinks.collect{ + case (e, l) + if allLinks.contains(l) + && !linksToStatusCodes(l).toString.startsWith("2") => + (e, l, linksToStatusCodes(l)) + } + ) + } + .filter(_._2.nonEmpty) + .toMap + } + + + def checkBrokenLinks() = Task.Command{ + if (brokenLocalLinks().nonEmpty){ + throw new Exception("Broken Local Links: " + upickle.default.write(brokenLocalLinks(), indent = 2)) + } + // This is flaky due to rate limits so ignore it for now + + // if (brokenRemoteLinks().nonEmpty){ + // throw new Exception("Broken Remote Links: " + upickle.default.write(brokenRemoteLinks(), indent = 2)) + // } + } + + def brokenLocalLinks: T[Map[os.Path, Seq[(String, String)]]] = Task{ + val allLinksAndAnchors0 = allLinksAndAnchors() + val pathsToIds = allLinksAndAnchors0 + .map{case (path, remoteLinks, localLinks, ids) => (path, ids)} + .toMap + + val brokenLinksPerPath: Seq[(os.Path, Seq[(String, String)])] = + for { + (path, remoteLinks, localLinks, ids) <- allLinksAndAnchors0 + // Skip scaladoc files when scanning for broken links because some + // of those are scaladoc bugs outside of our control. + if !path.segments.contains("api") + } yield{ + ( + path, + localLinks.flatMap{case (elementString, url) => + val (baseUrl, anchorOpt) = url match { + case s"#$anchor" => (path.toString, Some(anchor)) + case s"$prefix#$anchor" => (prefix, Some(anchor)) + + case url => (url, None) + } + + val dest0 = os.Path(baseUrl, path / "..") + val possibleDests = Seq(dest0, dest0 / "index.html") + possibleDests.find(os.exists(_)) match{ + case None => Some((elementString, url)) + case Some(dest) => + anchorOpt.collect{case a if !pathsToIds.getOrElse(dest, Set()).contains(a) => (elementString, url)} + } + } + ) + } + + val nonEmptyBrokenLinksPerPath = brokenLinksPerPath + .filter{ case (path, items) => path.last != "404.html" && items.nonEmpty } + + nonEmptyBrokenLinksPerPath.toMap + } } diff --git a/docs/supplemental-ui/helpers/gt.js b/docs/supplemental-ui/helpers/gt.js new file mode 100644 index 00000000000..6fc61648716 --- /dev/null +++ b/docs/supplemental-ui/helpers/gt.js @@ -0,0 +1,4 @@ + +module.exports = (n1, n2, { data: { root } }) => { + return n1 > n2; +} \ No newline at end of file diff --git a/docs/supplemental-ui/partials/header-content.hbs b/docs/supplemental-ui/partials/header-content.hbs index 5a7440f428a..953b8867ea2 100644 --- a/docs/supplemental-ui/partials/header-content.hbs +++ b/docs/supplemental-ui/partials/header-content.hbs @@ -16,11 +16,12 @@